hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7154188c1409f7ad80c6acf7c69384da06e644f | 4,384 | py | Python | jobChomper/graph.py | bhautikj/jobChomper | 09b50b3e14ab580a93376e4882214c18a8da34d5 | [
"MIT"
] | 1 | 2018-03-16T03:16:49.000Z | 2018-03-16T03:16:49.000Z | jobChomper/graph.py | bhautikj/jobChomper | 09b50b3e14ab580a93376e4882214c18a8da34d5 | [
"MIT"
] | null | null | null | jobChomper/graph.py | bhautikj/jobChomper | 09b50b3e14ab580a93376e4882214c18a8da34d5 | [
"MIT"
] | null | null | null | ##
## Weirdo Tree Graph that powers jobChomper
## --
##
## Assertions:
## * DAG is made up of named edges
## * Each edge is a triple (A, B, NEEDSPREVIOUSTOPASS)
## A, B are the named nodes
## B will execute after A has evaluated
## NEEDSPREVIOUSTOPASS is True or False; if it is True then A _must_ evaluate as True for B to run
## * There's a special node called STARTNODE from where execution starts
## * Comment lines in graph file start with #
## * Elements in graph lines separated by ',' - for example:
## A, B, True
##
import jobChomper.node
import logging
STARTNODENAME = "STARTNODE"
RUNONLYONPASS = "onlyOnPass"
RUNONFAIL = "onFail"
def findCycle(graph):
todo = set(graph.keys())
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in graph[top]:
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
class Graph(object):
""" Graph Object """
def __init__(self):
self.init = True
self.edges = set()
self.runDict = {}
self.nodeSet = set()
def buildRunDict(self):
self.runDict = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
self.nodeSet.add(nodeA)
self.nodeSet.add(nodeB)
priorSuccess = edge[2]
if nodeA not in self.runDict.keys():
self.runDict[nodeA] = {}
self.runDict[nodeA][RUNONLYONPASS]=[]
self.runDict[nodeA][RUNONFAIL]=[]
if priorSuccess == True:
self.runDict[nodeA][RUNONLYONPASS].append(nodeB)
else:
self.runDict[nodeA][RUNONFAIL].append(nodeB)
for node in self.nodeSet:
if node not in self.runDict.keys():
self.runDict[node]={}
self.runDict[node][RUNONLYONPASS]=[]
self.runDict[node][RUNONFAIL]=[]
def findCycles(self):
connectivity = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA not in connectivity.keys():
connectivity[nodeA] = []
connectivity[nodeA].append(nodeB)
return findCycle(connectivity)
def checkEdgeNodesValid(self):
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA == STARTNODENAME:
continue
if not jobChomper.node.nodeExists(nodeA):
raise ValueError("[Graph] no such node as: " + nodeA)
if not jobChomper.node.nodeExists(nodeB):
raise ValueError("[Graph] no such node as: " + nodeB)
def loadGraphFromFile(self, filename):
foundStart = False
with open(filename) as graphBody:
data = graphBody.read()
for line in data.split('\n'):
line = line.strip()
# Empty line
if line == '':
continue
# Comment line
if line[0] == '#':
continue
spl = line.split(',')
# Not a triple
if len(spl) != 3:
logging.error("Problem parsing: " + filename + " file has invalid triple: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " file has invalid triple: " + line)
nodeA = spl[0].strip()
nodeB = spl[1].strip()
prevEval = False
if spl[2].lower().strip() == 'true':
prevEval = True
if nodeA == STARTNODENAME:
if foundStart == True:
logging.error("Problem parsing: " + filename + " start node defined again: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " start node defined again: " + line)
else:
foundStart = True
triple = (nodeA, nodeB, prevEval)
self.edges.add(triple)
if foundStart == False:
logging.error("Problem parsing: " + filename + " cound not find " + STARTNODENAME)
raise ValueError("[Graph] Problem parsing: " + filename + " cound not find " + STARTNODENAME)
self.buildRunDict()
cycles = self.findCycles()
if cycles != None:
logging.error("Problem parsing: " + filename + " cycle detected:" + str(cycles))
raise ValueError("[Graph] Problem parsing: " + filename + " cycle detected:" + str(cycles))
self.checkEdgeNodesValid()
| 28.842105 | 107 | 0.580748 | [-1]
for node in graph[top]:
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
class Graph(object):
def __init__(self):
self.init = True
self.edges = set()
self.runDict = {}
self.nodeSet = set()
def buildRunDict(self):
self.runDict = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
self.nodeSet.add(nodeA)
self.nodeSet.add(nodeB)
priorSuccess = edge[2]
if nodeA not in self.runDict.keys():
self.runDict[nodeA] = {}
self.runDict[nodeA][RUNONLYONPASS]=[]
self.runDict[nodeA][RUNONFAIL]=[]
if priorSuccess == True:
self.runDict[nodeA][RUNONLYONPASS].append(nodeB)
else:
self.runDict[nodeA][RUNONFAIL].append(nodeB)
for node in self.nodeSet:
if node not in self.runDict.keys():
self.runDict[node]={}
self.runDict[node][RUNONLYONPASS]=[]
self.runDict[node][RUNONFAIL]=[]
def findCycles(self):
connectivity = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA not in connectivity.keys():
connectivity[nodeA] = []
connectivity[nodeA].append(nodeB)
return findCycle(connectivity)
def checkEdgeNodesValid(self):
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA == STARTNODENAME:
continue
if not jobChomper.node.nodeExists(nodeA):
raise ValueError("[Graph] no such node as: " + nodeA)
if not jobChomper.node.nodeExists(nodeB):
raise ValueError("[Graph] no such node as: " + nodeB)
def loadGraphFromFile(self, filename):
foundStart = False
with open(filename) as graphBody:
data = graphBody.read()
for line in data.split('\n'):
line = line.strip()
# Empty line
if line == '':
continue
# Comment line
if line[0] == '
continue
spl = line.split(',')
# Not a triple
if len(spl) != 3:
logging.error("Problem parsing: " + filename + " file has invalid triple: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " file has invalid triple: " + line)
nodeA = spl[0].strip()
nodeB = spl[1].strip()
prevEval = False
if spl[2].lower().strip() == 'true':
prevEval = True
if nodeA == STARTNODENAME:
if foundStart == True:
logging.error("Problem parsing: " + filename + " start node defined again: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " start node defined again: " + line)
else:
foundStart = True
triple = (nodeA, nodeB, prevEval)
self.edges.add(triple)
if foundStart == False:
logging.error("Problem parsing: " + filename + " cound not find " + STARTNODENAME)
raise ValueError("[Graph] Problem parsing: " + filename + " cound not find " + STARTNODENAME)
self.buildRunDict()
cycles = self.findCycles()
if cycles != None:
logging.error("Problem parsing: " + filename + " cycle detected:" + str(cycles))
raise ValueError("[Graph] Problem parsing: " + filename + " cycle detected:" + str(cycles))
self.checkEdgeNodesValid()
| true | true |
f715418cf642bd95568448dcaef9e2cf8c16dcc4 | 1,446 | py | Python | tests/00_unit/test_base.py | wolcomm/eos-prefix-list-agent | a1ec37494048f0f0524ca5ff985838d844c84e4e | [
"MIT"
] | 8 | 2019-06-02T23:47:38.000Z | 2021-08-24T07:30:08.000Z | tests/00_unit/test_base.py | wolcomm/eos-prefix-list-agent | a1ec37494048f0f0524ca5ff985838d844c84e4e | [
"MIT"
] | 39 | 2019-04-09T06:21:56.000Z | 2022-01-29T10:00:37.000Z | tests/00_unit/test_base.py | wolcomm/eos-prefix-list-agent | a1ec37494048f0f0524ca5ff985838d844c84e4e | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the MIT License
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for prefix_list_agent.agent module."""
from __future__ import print_function
import pytest
from prefix_list_agent.base import PrefixListBase
class TestPrefixListAgent(object):
"""Test cases for PrefixListBase object."""
def test_init(self, sdk, mocker):
"""Test case for PrefixListAgent initialisation."""
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
assert isinstance(base, PrefixListBase)
@pytest.mark.parametrize("level", ("emerg", "alert", "crit", "err",
"warning", "notice", "info", "debug"))
def test_tracing(self, mocker, level):
"""Test calls to tracer."""
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
method = getattr(base, level)
method("message")
assert base.tracer.trace.call_count == 1
| 37.076923 | 79 | 0.69018 |
from __future__ import print_function
import pytest
from prefix_list_agent.base import PrefixListBase
class TestPrefixListAgent(object):
def test_init(self, sdk, mocker):
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
assert isinstance(base, PrefixListBase)
@pytest.mark.parametrize("level", ("emerg", "alert", "crit", "err",
"warning", "notice", "info", "debug"))
def test_tracing(self, mocker, level):
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
method = getattr(base, level)
method("message")
assert base.tracer.trace.call_count == 1
| true | true |
f71541e82fcb35f2b0c32b4abe7a90cb5afb6096 | 4,982 | py | Python | homeassistant/components/august/gateway.py | bg1000/core | 4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5 | [
"Apache-2.0"
] | 1 | 2021-03-20T12:25:26.000Z | 2021-03-20T12:25:26.000Z | homeassistant/components/august/gateway.py | bg1000/core | 4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5 | [
"Apache-2.0"
] | 51 | 2020-08-03T07:30:44.000Z | 2022-03-22T06:02:42.000Z | homeassistant/components/august/gateway.py | bg1000/core | 4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5 | [
"Apache-2.0"
] | null | null | null | """Handle August connection setup and authentication."""
import asyncio
import logging
import os
from aiohttp import ClientError, ClientResponseError
from august.api_async import ApiAsync
from august.authenticator_async import AuthenticationState, AuthenticatorAsync
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DEFAULT_AUGUST_CONFIG_FILE,
DEFAULT_TIMEOUT,
VERIFICATION_CODE_KEY,
)
from .exceptions import CannotConnect, InvalidAuth, RequireValidation
_LOGGER = logging.getLogger(__name__)
class AugustGateway:
"""Handle the connection to August."""
def __init__(self, hass):
"""Init the connection."""
self._aiohttp_session = aiohttp_client.async_get_clientsession(hass)
self._token_refresh_lock = asyncio.Lock()
self._access_token_cache_file = None
self._hass = hass
self._config = None
self.api = None
self.authenticator = None
self.authentication = None
@property
def access_token(self):
"""Access token for the api."""
return self.authentication.access_token
def config_entry(self):
"""Config entry."""
return {
CONF_LOGIN_METHOD: self._config[CONF_LOGIN_METHOD],
CONF_USERNAME: self._config[CONF_USERNAME],
CONF_INSTALL_ID: self._config.get(CONF_INSTALL_ID),
CONF_ACCESS_TOKEN_CACHE_FILE: self._access_token_cache_file,
}
async def async_setup(self, conf):
"""Create the api and authenticator objects."""
if conf.get(VERIFICATION_CODE_KEY):
return
self._access_token_cache_file = conf.get(
CONF_ACCESS_TOKEN_CACHE_FILE,
f".{conf[CONF_USERNAME]}{DEFAULT_AUGUST_CONFIG_FILE}",
)
self._config = conf
self.api = ApiAsync(
self._aiohttp_session,
timeout=self._config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
self.authenticator = AuthenticatorAsync(
self.api,
self._config[CONF_LOGIN_METHOD],
self._config[CONF_USERNAME],
self._config.get(CONF_PASSWORD, ""),
install_id=self._config.get(CONF_INSTALL_ID),
access_token_cache_file=self._hass.config.path(
self._access_token_cache_file
),
)
await self.authenticator.async_setup_authentication()
async def async_authenticate(self):
"""Authenticate with the details provided to setup."""
self.authentication = None
try:
self.authentication = await self.authenticator.async_authenticate()
if self.authentication.state == AuthenticationState.AUTHENTICATED:
# Call the locks api to verify we are actually
# authenticated because we can be authenticated
# by have no access
await self.api.async_get_operable_locks(self.access_token)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
raise InvalidAuth from ex
raise CannotConnect from ex
except ClientError as ex:
_LOGGER.error("Unable to connect to August service: %s", str(ex))
raise CannotConnect from ex
if self.authentication.state == AuthenticationState.BAD_PASSWORD:
raise InvalidAuth
if self.authentication.state == AuthenticationState.REQUIRES_VALIDATION:
raise RequireValidation
if self.authentication.state != AuthenticationState.AUTHENTICATED:
_LOGGER.error("Unknown authentication state: %s", self.authentication.state)
raise InvalidAuth
return self.authentication
async def async_reset_authentication(self):
"""Remove the cache file."""
await self._hass.async_add_executor_job(self._reset_authentication)
def _reset_authentication(self):
"""Remove the cache file."""
if os.path.exists(self._access_token_cache_file):
os.unlink(self._access_token_cache_file)
async def async_refresh_access_token_if_needed(self):
"""Refresh the august access token if needed."""
if not self.authenticator.should_refresh():
return
async with self._token_refresh_lock:
refreshed_authentication = (
await self.authenticator.async_refresh_access_token(force=False)
)
_LOGGER.info(
"Refreshed august access token. The old token expired at %s, and the new token expires at %s",
self.authentication.access_token_expires,
refreshed_authentication.access_token_expires,
)
self.authentication = refreshed_authentication
| 34.839161 | 110 | 0.66399 |
import asyncio
import logging
import os
from aiohttp import ClientError, ClientResponseError
from august.api_async import ApiAsync
from august.authenticator_async import AuthenticationState, AuthenticatorAsync
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DEFAULT_AUGUST_CONFIG_FILE,
DEFAULT_TIMEOUT,
VERIFICATION_CODE_KEY,
)
from .exceptions import CannotConnect, InvalidAuth, RequireValidation
_LOGGER = logging.getLogger(__name__)
class AugustGateway:
def __init__(self, hass):
self._aiohttp_session = aiohttp_client.async_get_clientsession(hass)
self._token_refresh_lock = asyncio.Lock()
self._access_token_cache_file = None
self._hass = hass
self._config = None
self.api = None
self.authenticator = None
self.authentication = None
@property
def access_token(self):
return self.authentication.access_token
def config_entry(self):
return {
CONF_LOGIN_METHOD: self._config[CONF_LOGIN_METHOD],
CONF_USERNAME: self._config[CONF_USERNAME],
CONF_INSTALL_ID: self._config.get(CONF_INSTALL_ID),
CONF_ACCESS_TOKEN_CACHE_FILE: self._access_token_cache_file,
}
async def async_setup(self, conf):
if conf.get(VERIFICATION_CODE_KEY):
return
self._access_token_cache_file = conf.get(
CONF_ACCESS_TOKEN_CACHE_FILE,
f".{conf[CONF_USERNAME]}{DEFAULT_AUGUST_CONFIG_FILE}",
)
self._config = conf
self.api = ApiAsync(
self._aiohttp_session,
timeout=self._config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
self.authenticator = AuthenticatorAsync(
self.api,
self._config[CONF_LOGIN_METHOD],
self._config[CONF_USERNAME],
self._config.get(CONF_PASSWORD, ""),
install_id=self._config.get(CONF_INSTALL_ID),
access_token_cache_file=self._hass.config.path(
self._access_token_cache_file
),
)
await self.authenticator.async_setup_authentication()
async def async_authenticate(self):
self.authentication = None
try:
self.authentication = await self.authenticator.async_authenticate()
if self.authentication.state == AuthenticationState.AUTHENTICATED:
await self.api.async_get_operable_locks(self.access_token)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
raise InvalidAuth from ex
raise CannotConnect from ex
except ClientError as ex:
_LOGGER.error("Unable to connect to August service: %s", str(ex))
raise CannotConnect from ex
if self.authentication.state == AuthenticationState.BAD_PASSWORD:
raise InvalidAuth
if self.authentication.state == AuthenticationState.REQUIRES_VALIDATION:
raise RequireValidation
if self.authentication.state != AuthenticationState.AUTHENTICATED:
_LOGGER.error("Unknown authentication state: %s", self.authentication.state)
raise InvalidAuth
return self.authentication
async def async_reset_authentication(self):
await self._hass.async_add_executor_job(self._reset_authentication)
def _reset_authentication(self):
if os.path.exists(self._access_token_cache_file):
os.unlink(self._access_token_cache_file)
async def async_refresh_access_token_if_needed(self):
if not self.authenticator.should_refresh():
return
async with self._token_refresh_lock:
refreshed_authentication = (
await self.authenticator.async_refresh_access_token(force=False)
)
_LOGGER.info(
"Refreshed august access token. The old token expired at %s, and the new token expires at %s",
self.authentication.access_token_expires,
refreshed_authentication.access_token_expires,
)
self.authentication = refreshed_authentication
| true | true |
f715431153040fc9d72aca7e7a4ab69f64467305 | 8,101 | py | Python | mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py | SamuelWiqvist/snpla | 9d586c5d09de3eecd2536485af6fc28a915443e4 | [
"MIT"
] | 2 | 2021-02-17T14:13:54.000Z | 2021-06-01T08:29:35.000Z | mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py | SamuelWiqvist/snpla | 9d586c5d09de3eecd2536485af6fc28a915443e4 | [
"MIT"
] | null | null | null | mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py | SamuelWiqvist/snpla | 9d586c5d09de3eecd2536485af6fc28a915443e4 | [
"MIT"
] | null | null | null | # Imports
import sys
import torch
import os
import time
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
# Initial set up
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
lambda_val = float(sys.argv[6]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
if lambda_val > 0:
id_job = id_job + "_" + str(lambda_val)
# Set wd
print(os.getcwd())
# set the wd to the base folder for the project
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
# Load all utility functions for all methods
import mv_gaussian.low_dim_w_summary_stats.functions as func
import algorithms.snpla as snpla
# Set model and generate data
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
# set up posterior network
flow_lik, flow_post = func.set_up_networks()
## Generate test data
N_prior_pred_test = 1000
x_test, theta_test = func.run_model_sim(N_prior_pred_test, seed + 2, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, True)
# Generate test data for obs data set
print(conj_model.model_sim(theta_test).shape)
N_test_obs_data = 1000
x_test_obs_data = torch.zeros(N_test_obs_data, 5)
theta_test_obs_data = torch.zeros(N_test_obs_data, dim)
for i in range(N_test_obs_data):
x_test_obs_data[i, :] = func.calc_summary_stats(x_o)
theta_test_obs_data[i, :] = conj_model.model.loc
# Set up networks for the likelihood model
# Base dist for posterior model
flow_lik, flow_post = func.set_up_networks()
hyper_params = [0.001, 0.002, 0.95, 0.7] # lr_like, lr_post, gamma_post, gamma
if lambda_val > 0:
hyper_params[-1] = lambda_val
if hp_tuning >= 2:
hyper_params = func.sample_hp("snpla", hp_tuning)
optimizer_lik = torch.optim.Adam(flow_lik.parameters(), lr=hyper_params[0])
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=hyper_params[1])
decay_rate_post = hyper_params[2] # no adaptation of Adam's base rate
nbr_rounds = 10
prob_prior_decay_rate = hyper_params[3]
prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
print(prob_prior)
#nbr_lik = [2000, 2000, 2000, 2000]
#nbr_epochs_lik = [25, 25, 25, 25]
#batch_size = 50
#batch_size_post = 50
#nbr_post = [10000, 10000, 10000, 10000]
#nbr_epochs_post = [25, 25, 25, 25]
nbr_lik = [2500 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]
nbr_epochs_lik = [75 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]
batch_size = 50
batch_size_post = 1000
nbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]
nbr_epochs_post = [75 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]
x_o_batch_post = torch.zeros(batch_size_post, 5)
for i in range(batch_size_post):
x_o_batch_post[i, :] = func.calc_summary_stats(x_o)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
start = time.time()
# TODO check prior and simulator
models_lik, models_post = snpla.inference_snpla(flow_lik,
flow_post,
conj_model.prior,
conj_model.model_sim,
optimizer_lik,
optimizer_post,
decay_rate_post,
func.calc_summary_stats(x_o),
x_o_batch_post,
dim,
prob_prior,
nbr_lik,
nbr_epochs_lik,
nbr_post,
nbr_epochs_post,
batch_size,
batch_size_post)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
torch.manual_seed(seed)
for i in range(nbr_rounds):
print(i)
posterior_sample = models_post[i].sample(1000, context=func.calc_summary_stats(x_o))
posterior_sample = posterior_sample.reshape((1000, 2))
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0 and lambda_val > 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/lambda_val/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
elif hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/hp_tuning/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / nbr_rounds
if hp_tuning == 0 and lambda_val > 0:
with open('mv_gaussian/low_dim_w_summary_stats/lambda_val/snpla_' + id_job + '.txt', 'w') as f:
for h in hyper_params:
f.write('%.6f\n' % h)
for p in prob_prior:
f.write('%.6f\n' % p)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
elif hp_tuning == 0:
with open('mv_gaussian/low_dim_w_summary_stats/results/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_summary_stats/hp_tuning/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
for h in hyper_params:
f.write('%.6f\n' % h)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
if hp_tuning == 0:
# Inference
# Sample data from post pred
N_post_pred_test = 1000
x_post_pred, theta_post_pred = func.run_model_sim(N_post_pred_test, seed + 3, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, False)
torch.manual_seed(seed)
x_prior = flow_lik.sample(1, context=theta_test)
x_theta_true = flow_lik.sample(1, context=theta_test_obs_data)
x_post = flow_lik.sample(1, context=theta_post_pred)
x_prior = x_prior.reshape(x_test.shape)
x_theta_true = x_theta_true.reshape(x_test_obs_data.shape)
x_post = x_post.reshape(x_post_pred.shape)
# Write results
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_snpla_' + id_job +
'.csv', x_theta_true.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_prior_snpla_' + id_job + '.csv',
x_prior.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_post_snpla_' + id_job + '.csv',
x_post.detach().numpy(), delimiter=",")
| 33.754167 | 125 | 0.627453 |
import sys
import torch
import os
import time
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5])
lambda_val = float(sys.argv[6])
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
if lambda_val > 0:
id_job = id_job + "_" + str(lambda_val)
print(os.getcwd())
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
import mv_gaussian.low_dim_w_summary_stats.functions as func
import algorithms.snpla as snpla
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
flow_lik, flow_post = func.set_up_networks()
= 1000
x_test, theta_test = func.run_model_sim(N_prior_pred_test, seed + 2, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, True)
print(conj_model.model_sim(theta_test).shape)
N_test_obs_data = 1000
x_test_obs_data = torch.zeros(N_test_obs_data, 5)
theta_test_obs_data = torch.zeros(N_test_obs_data, dim)
for i in range(N_test_obs_data):
x_test_obs_data[i, :] = func.calc_summary_stats(x_o)
theta_test_obs_data[i, :] = conj_model.model.loc
flow_lik, flow_post = func.set_up_networks()
hyper_params = [0.001, 0.002, 0.95, 0.7]
if lambda_val > 0:
hyper_params[-1] = lambda_val
if hp_tuning >= 2:
hyper_params = func.sample_hp("snpla", hp_tuning)
optimizer_lik = torch.optim.Adam(flow_lik.parameters(), lr=hyper_params[0])
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=hyper_params[1])
decay_rate_post = hyper_params[2]
nbr_rounds = 10
prob_prior_decay_rate = hyper_params[3]
prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
print(prob_prior)
#nbr_lik = [2000, 2000, 2000, 2000]
#nbr_epochs_lik = [25, 25, 25, 25]
#batch_size = 50
#batch_size_post = 50
#nbr_post = [10000, 10000, 10000, 10000]
#nbr_epochs_post = [25, 25, 25, 25]
nbr_lik = [2500 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]
nbr_epochs_lik = [75 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]
batch_size = 50
batch_size_post = 1000
nbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]
nbr_epochs_post = [75 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]
x_o_batch_post = torch.zeros(batch_size_post, 5)
for i in range(batch_size_post):
x_o_batch_post[i, :] = func.calc_summary_stats(x_o)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
start = time.time()
# TODO check prior and simulator
models_lik, models_post = snpla.inference_snpla(flow_lik,
flow_post,
conj_model.prior,
conj_model.model_sim,
optimizer_lik,
optimizer_post,
decay_rate_post,
func.calc_summary_stats(x_o),
x_o_batch_post,
dim,
prob_prior,
nbr_lik,
nbr_epochs_lik,
nbr_post,
nbr_epochs_post,
batch_size,
batch_size_post)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
torch.manual_seed(seed)
for i in range(nbr_rounds):
print(i)
posterior_sample = models_post[i].sample(1000, context=func.calc_summary_stats(x_o))
posterior_sample = posterior_sample.reshape((1000, 2))
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0 and lambda_val > 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/lambda_val/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
elif hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/hp_tuning/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / nbr_rounds
if hp_tuning == 0 and lambda_val > 0:
with open('mv_gaussian/low_dim_w_summary_stats/lambda_val/snpla_' + id_job + '.txt', 'w') as f:
for h in hyper_params:
f.write('%.6f\n' % h)
for p in prob_prior:
f.write('%.6f\n' % p)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
elif hp_tuning == 0:
with open('mv_gaussian/low_dim_w_summary_stats/results/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_summary_stats/hp_tuning/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
for h in hyper_params:
f.write('%.6f\n' % h)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
if hp_tuning == 0:
# Inference
# Sample data from post pred
N_post_pred_test = 1000
x_post_pred, theta_post_pred = func.run_model_sim(N_post_pred_test, seed + 3, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, False)
torch.manual_seed(seed)
x_prior = flow_lik.sample(1, context=theta_test)
x_theta_true = flow_lik.sample(1, context=theta_test_obs_data)
x_post = flow_lik.sample(1, context=theta_post_pred)
x_prior = x_prior.reshape(x_test.shape)
x_theta_true = x_theta_true.reshape(x_test_obs_data.shape)
x_post = x_post.reshape(x_post_pred.shape)
# Write results
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_snpla_' + id_job +
'.csv', x_theta_true.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_prior_snpla_' + id_job + '.csv',
x_prior.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_post_snpla_' + id_job + '.csv',
x_post.detach().numpy(), delimiter=",")
| true | true |
f715435ca91863f52909480c8b9b5ef1a9fa028f | 3,048 | py | Python | alipay/aop/api/domain/AlipayUserCertDocIDCard.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserCertDocIDCard.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserCertDocIDCard.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserCertDocIDCard(object):
def __init__(self):
self._encoded_img_emblem = None
self._encoded_img_identity = None
self._expire_date = None
self._name = None
self._number = None
@property
def encoded_img_emblem(self):
return self._encoded_img_emblem
@encoded_img_emblem.setter
def encoded_img_emblem(self, value):
self._encoded_img_emblem = value
@property
def encoded_img_identity(self):
return self._encoded_img_identity
@encoded_img_identity.setter
def encoded_img_identity(self, value):
self._encoded_img_identity = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def number(self):
return self._number
@number.setter
def number(self, value):
self._number = value
def to_alipay_dict(self):
params = dict()
if self.encoded_img_emblem:
if hasattr(self.encoded_img_emblem, 'to_alipay_dict'):
params['encoded_img_emblem'] = self.encoded_img_emblem.to_alipay_dict()
else:
params['encoded_img_emblem'] = self.encoded_img_emblem
if self.encoded_img_identity:
if hasattr(self.encoded_img_identity, 'to_alipay_dict'):
params['encoded_img_identity'] = self.encoded_img_identity.to_alipay_dict()
else:
params['encoded_img_identity'] = self.encoded_img_identity
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.number:
if hasattr(self.number, 'to_alipay_dict'):
params['number'] = self.number.to_alipay_dict()
else:
params['number'] = self.number
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserCertDocIDCard()
if 'encoded_img_emblem' in d:
o.encoded_img_emblem = d['encoded_img_emblem']
if 'encoded_img_identity' in d:
o.encoded_img_identity = d['encoded_img_identity']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'name' in d:
o.name = d['name']
if 'number' in d:
o.number = d['number']
return o
| 30.178218 | 91 | 0.607612 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserCertDocIDCard(object):
def __init__(self):
self._encoded_img_emblem = None
self._encoded_img_identity = None
self._expire_date = None
self._name = None
self._number = None
@property
def encoded_img_emblem(self):
return self._encoded_img_emblem
@encoded_img_emblem.setter
def encoded_img_emblem(self, value):
self._encoded_img_emblem = value
@property
def encoded_img_identity(self):
return self._encoded_img_identity
@encoded_img_identity.setter
def encoded_img_identity(self, value):
self._encoded_img_identity = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def number(self):
return self._number
@number.setter
def number(self, value):
self._number = value
def to_alipay_dict(self):
params = dict()
if self.encoded_img_emblem:
if hasattr(self.encoded_img_emblem, 'to_alipay_dict'):
params['encoded_img_emblem'] = self.encoded_img_emblem.to_alipay_dict()
else:
params['encoded_img_emblem'] = self.encoded_img_emblem
if self.encoded_img_identity:
if hasattr(self.encoded_img_identity, 'to_alipay_dict'):
params['encoded_img_identity'] = self.encoded_img_identity.to_alipay_dict()
else:
params['encoded_img_identity'] = self.encoded_img_identity
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.number:
if hasattr(self.number, 'to_alipay_dict'):
params['number'] = self.number.to_alipay_dict()
else:
params['number'] = self.number
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserCertDocIDCard()
if 'encoded_img_emblem' in d:
o.encoded_img_emblem = d['encoded_img_emblem']
if 'encoded_img_identity' in d:
o.encoded_img_identity = d['encoded_img_identity']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'name' in d:
o.name = d['name']
if 'number' in d:
o.number = d['number']
return o
| true | true |
f71543f1283b285219a186f659bb0b4f1109d5be | 387 | py | Python | boost/inception/migrations/0006_auto_20190723_1445.py | igorMIA/bus_com_parser | 07de65f3106c302d96b5fd9dad89562de44ec63f | [
"MIT"
] | null | null | null | boost/inception/migrations/0006_auto_20190723_1445.py | igorMIA/bus_com_parser | 07de65f3106c302d96b5fd9dad89562de44ec63f | [
"MIT"
] | 12 | 2020-02-12T01:09:12.000Z | 2022-03-11T23:54:05.000Z | boost/inception/migrations/0006_auto_20190723_1445.py | igorMIA/bus_com_parser | 07de65f3106c302d96b5fd9dad89562de44ec63f | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-23 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inception', '0005_auto_20190723_0810'),
]
operations = [
migrations.AlterField(
model_name='busstation',
name='cost',
field=models.FloatField(null=True),
),
]
| 20.368421 | 49 | 0.599483 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inception', '0005_auto_20190723_0810'),
]
operations = [
migrations.AlterField(
model_name='busstation',
name='cost',
field=models.FloatField(null=True),
),
]
| true | true |
f715452e7dcf6d35a6ee975f8b79b14024d1e44c | 1,053 | py | Python | ProjectEulerPython/problems/problem_039.py | geo-desic/project-euler | 8065ee082a6948447ef961c9aa960c90a815a3ab | [
"MIT"
] | null | null | null | ProjectEulerPython/problems/problem_039.py | geo-desic/project-euler | 8065ee082a6948447ef961c9aa960c90a815a3ab | [
"MIT"
] | null | null | null | ProjectEulerPython/problems/problem_039.py | geo-desic/project-euler | 8065ee082a6948447ef961c9aa960c90a815a3ab | [
"MIT"
] | null | null | null | from problems.problem import Problem
def generate_pythagorean_triples(ub: int) -> []:
# https://en.wikipedia.org/wiki/Pythagorean_triple
result = []
for a in range(1, ub):
aa = a * a
b = a + 1
c = b + 1
while c <= ub:
cc = aa + b * b
while c * c < cc:
c += 1
if c * c == cc and c <= ub:
result.append([a + b + c, a, b, c])
b += 1
return result
class Problem039(Problem):
def calculate_answer(self) -> int:
# p = perimeter
# a < b < c = hypotenuse
answer = 0
max_perimeter = 1000
solution_counts = [0 for i in range(max_perimeter + 1)]
triangles = generate_pythagorean_triples(max_perimeter // 2 + 1)
max_solutions = 0
for triangle in triangles:
p = triangle[0]
if p <= max_perimeter:
solution_counts[p] += 1
solutions = solution_counts[p]
if (solutions > max_solutions):
max_solutions = solutions
answer = p
self.print_detail(f"p = {answer}; solutions = {solutions}")
return answer
| 27 | 69 | 0.576448 | from problems.problem import Problem
def generate_pythagorean_triples(ub: int) -> []:
result = []
for a in range(1, ub):
aa = a * a
b = a + 1
c = b + 1
while c <= ub:
cc = aa + b * b
while c * c < cc:
c += 1
if c * c == cc and c <= ub:
result.append([a + b + c, a, b, c])
b += 1
return result
class Problem039(Problem):
def calculate_answer(self) -> int:
answer = 0
max_perimeter = 1000
solution_counts = [0 for i in range(max_perimeter + 1)]
triangles = generate_pythagorean_triples(max_perimeter // 2 + 1)
max_solutions = 0
for triangle in triangles:
p = triangle[0]
if p <= max_perimeter:
solution_counts[p] += 1
solutions = solution_counts[p]
if (solutions > max_solutions):
max_solutions = solutions
answer = p
self.print_detail(f"p = {answer}; solutions = {solutions}")
return answer
| true | true |
f7154628c62314b11aa471a6abfc977dd15376a2 | 1,130 | py | Python | scripts/install_nightly.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 62 | 2018-03-30T15:46:56.000Z | 2021-12-08T23:30:24.000Z | scripts/install_nightly.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 114 | 2018-03-21T01:12:43.000Z | 2021-07-05T12:29:54.000Z | scripts/install_nightly.py | CDAT/uvcdat | 5133560c0c049b5c93ee321ba0af494253b44f91 | [
"BSD-3-Clause"
] | 14 | 2018-06-06T02:42:47.000Z | 2021-11-26T03:27:00.000Z | import sys
import os
import argparse
this_dir = os.path.abspath(os.path.dirname(__file__))
modules_dir = os.path.join(this_dir, '..', 'modules')
sys.path.append(modules_dir)
from Const import *
from Util import *
from CondaUtils import *
from CDATSetupUtils import *
valid_py_vers = PYTHON_VERSIONS
parser = argparse.ArgumentParser(description="install nightly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w", "--workdir",
help="working directory -- miniconda will be installed here")
parser.add_argument("-p", "--py_ver", choices=valid_py_vers,
help="python version, 'py2.7' or 'py3.6' or 'py3.7'")
args = parser.parse_args()
workdir = args.workdir
py_ver = args.py_ver
status, conda_path = install_miniconda(workdir, py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
print("xxx conda_path: {p}".format(p=conda_path))
status, env_name = install_nightly(workdir, conda_path, 'nightly', py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
status = conda_list(conda_path, env_name)
sys.exit(status)
| 23.061224 | 88 | 0.7 | import sys
import os
import argparse
this_dir = os.path.abspath(os.path.dirname(__file__))
modules_dir = os.path.join(this_dir, '..', 'modules')
sys.path.append(modules_dir)
from Const import *
from Util import *
from CondaUtils import *
from CDATSetupUtils import *
valid_py_vers = PYTHON_VERSIONS
parser = argparse.ArgumentParser(description="install nightly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w", "--workdir",
help="working directory -- miniconda will be installed here")
parser.add_argument("-p", "--py_ver", choices=valid_py_vers,
help="python version, 'py2.7' or 'py3.6' or 'py3.7'")
args = parser.parse_args()
workdir = args.workdir
py_ver = args.py_ver
status, conda_path = install_miniconda(workdir, py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
print("xxx conda_path: {p}".format(p=conda_path))
status, env_name = install_nightly(workdir, conda_path, 'nightly', py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
status = conda_list(conda_path, env_name)
sys.exit(status)
| true | true |
f715466767fcc3856809b3d312d0e597406be25b | 1,717 | py | Python | model/summaries.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | 2 | 2019-03-20T09:05:02.000Z | 2019-03-20T15:23:44.000Z | model/summaries.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | null | null | null | model/summaries.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
DEFAULT_N_BINS = 10
def compute_summaries(clf, X, W, n_bins=DEFAULT_N_BINS):
proba = clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=n_bins)
return count
class ClassifierSummaryComputer():
def __init__(self, clf, n_bins=DEFAULT_N_BINS):
self.clf = clf
self.n_bins = n_bins
def __call__(self, X, W):
proba = self.clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=self.n_bins)
return count
class HistogramSummaryComputer():
def __init__(self, n_bins=DEFAULT_N_BINS):
self.n_bins = n_bins
def fit(self, X):
self.edges_list = []
for i in range(X.shape[1]):
x = X[:, i]
maximum = np.max(x)
minimum = np.min(x)
diff = maximum - minimum
maximum = maximum + diff / self.n_bins # be a bit more inclusive
minimum = minimum - diff / self.n_bins # be a bit more inclusive
count, bin_edges = np.histogram(x, range=(minimum, maximum), bins=self.n_bins)
self.edges_list.append(bin_edges)
return self
def predict(self, X, W):
counts = []
for i, bin_edges in enumerate(self.edges_list):
x = X[:, i]
count, _ = np.histogram(x, bins=bin_edges, weights=W)
counts.extend(count)
return counts
def __call__(self, X, W):
counts = self.predict(X, W)
return np.array(counts)
| 30.122807 | 90 | 0.610367 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
DEFAULT_N_BINS = 10
def compute_summaries(clf, X, W, n_bins=DEFAULT_N_BINS):
proba = clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=n_bins)
return count
class ClassifierSummaryComputer():
def __init__(self, clf, n_bins=DEFAULT_N_BINS):
self.clf = clf
self.n_bins = n_bins
def __call__(self, X, W):
proba = self.clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=self.n_bins)
return count
class HistogramSummaryComputer():
def __init__(self, n_bins=DEFAULT_N_BINS):
self.n_bins = n_bins
def fit(self, X):
self.edges_list = []
for i in range(X.shape[1]):
x = X[:, i]
maximum = np.max(x)
minimum = np.min(x)
diff = maximum - minimum
maximum = maximum + diff / self.n_bins
minimum = minimum - diff / self.n_bins
count, bin_edges = np.histogram(x, range=(minimum, maximum), bins=self.n_bins)
self.edges_list.append(bin_edges)
return self
def predict(self, X, W):
counts = []
for i, bin_edges in enumerate(self.edges_list):
x = X[:, i]
count, _ = np.histogram(x, bins=bin_edges, weights=W)
counts.extend(count)
return counts
def __call__(self, X, W):
counts = self.predict(X, W)
return np.array(counts)
| true | true |
f71546b9ddaa47e3907f339c6e8a2f21aac12fe0 | 1,566 | py | Python | 14_Modulos_e_pacotes/ex110/moeda.py | TheCarvalho/Curso-Em-Video-Python | 8bd5128023ddf8b0f59eab46463c95e47569da73 | [
"Unlicense"
] | null | null | null | 14_Modulos_e_pacotes/ex110/moeda.py | TheCarvalho/Curso-Em-Video-Python | 8bd5128023ddf8b0f59eab46463c95e47569da73 | [
"Unlicense"
] | null | null | null | 14_Modulos_e_pacotes/ex110/moeda.py | TheCarvalho/Curso-Em-Video-Python | 8bd5128023ddf8b0f59eab46463c95e47569da73 | [
"Unlicense"
] | null | null | null | def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def diminuir(valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
#! lembra que if formato: => formato = True ||| if not formato: => formato = False
# aqui moeda fica como segundo parametro pois o primeiro a ser importado é o valor
def moeda(valor=0, moeda='R$'): # Posso mudar a moeda apenas informando outra lá na hora de importar
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
# .rjust() => direita | .ljust() => esquerda | .center() => centralizar
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
'''
print(f'\nA metade de {moeda.moeda(p, "US$")} é {moeda.metade(p, True )}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, True)}')
print(f'Aumentando 10% temos {moeda.aumentar(p, 10, True)}')
print(f'Reduzindo 13% temos {moeda.diminuir(p, 13, True)}')
'''
| 31.32 | 101 | 0.650064 | def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def diminuir(valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
def moeda(valor=0, moeda='R$'):
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
| true | true |
f71548f4a246b57a8868bfef6d1910128b7621d9 | 4,245 | py | Python | tools/ci_build/op_registration_validator.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 669 | 2018-12-03T22:00:31.000Z | 2019-05-06T19:42:49.000Z | tools/ci_build/op_registration_validator.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 440 | 2018-12-03T21:09:56.000Z | 2019-05-06T20:47:23.000Z | tools/ci_build/op_registration_validator.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 140 | 2018-12-03T21:15:28.000Z | 2019-05-06T18:02:36.000Z | # !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Validate ORT kernel registrations.
"""
import argparse
import os
import sys
import typing
import op_registration_utils
from logger import get_logger
log = get_logger("op_registration_validator")
# deprecated ops where the last registration should have an end version.
# value for each entry is the opset when it was deprecated. end version of last registration should equal value - 1.
deprecated_ops = {
"kOnnxDomain:Scatter": 11,
"kOnnxDomain:Upsample": 10,
# MeanVarianceNormalization and ThresholdedRelu were in contrib ops and incorrectly registered using
# kOnnxDomain. They became official ONNX operators later and are registered there now. That leaves
# entries in the contrib ops registrations with end versions for when the contrib op was 'deprecated'
# and became an official op.
"kOnnxDomain:MeanVarianceNormalization": 9,
"kOnnxDomain:ThresholdedRelu": 10,
}
class RegistrationValidator(op_registration_utils.RegistrationProcessor):
def __init__(self):
self.last_op_registrations = {}
self.failed = False
def process_registration(
self,
lines: typing.List[str],
domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
key = domain + ":" + operator
prev_start, prev_end = self.last_op_registrations[key] if key in self.last_op_registrations else (None, None)
if prev_start:
# a typed registration where the to/from matches for each entry so nothing to update
if prev_start == start_version and prev_end == end_version:
return
# previous registration was unversioned but should have been if we are seeing another registration
if not prev_end:
log.error(
"Invalid registration for {}. Registration for opset {} has no end version but was "
"superceeded by version {}.".format(key, prev_start, start_version)
)
self.failed = True
return
# previous registration end opset is not adjacent to the start of the next registration
if prev_end != start_version - 1:
log.error(
"Invalid registration for {}. Registration for opset {} should have end version of {}".format(
key, prev_start, start_version - 1
)
)
self.failed = True
return
self.last_op_registrations[key] = (start_version, end_version)
def ok(self):
return not self.failed
def validate_last_registrations(self):
# make sure we have an unversioned last entry for each operator unless it's deprecated
for entry in self.last_op_registrations.items():
key, value = entry
opset_from, opset_to = value
deprecated = key in deprecated_ops and opset_to == deprecated_ops[key] - 1
if opset_to and not deprecated:
log.error("Missing unversioned registration for {}".format(key))
self.failed = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to validate operator kernel registrations.")
parser.add_argument(
"--ort_root",
type=str,
help="Path to ONNXRuntime repository root. " "Inferred from the location of this script if not provided.",
)
args = parser.parse_args()
ort_root = os.path.abspath(args.ort_root) if args.ort_root else ""
include_cuda = True # validate CPU and CUDA EP registrations
registration_files = op_registration_utils.get_kernel_registration_files(ort_root, include_cuda)
for file in registration_files:
log.info("Processing {}".format(file))
processor = RegistrationValidator()
op_registration_utils.process_kernel_registration_file(file, processor)
processor.validate_last_registrations()
if not processor.ok():
sys.exit(-1)
| 36.594828 | 117 | 0.659128 |
import argparse
import os
import sys
import typing
import op_registration_utils
from logger import get_logger
log = get_logger("op_registration_validator")
deprecated_ops = {
"kOnnxDomain:Scatter": 11,
"kOnnxDomain:Upsample": 10,
"kOnnxDomain:MeanVarianceNormalization": 9,
"kOnnxDomain:ThresholdedRelu": 10,
}
class RegistrationValidator(op_registration_utils.RegistrationProcessor):
def __init__(self):
self.last_op_registrations = {}
self.failed = False
def process_registration(
self,
lines: typing.List[str],
domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
key = domain + ":" + operator
prev_start, prev_end = self.last_op_registrations[key] if key in self.last_op_registrations else (None, None)
if prev_start:
if prev_start == start_version and prev_end == end_version:
return
if not prev_end:
log.error(
"Invalid registration for {}. Registration for opset {} has no end version but was "
"superceeded by version {}.".format(key, prev_start, start_version)
)
self.failed = True
return
if prev_end != start_version - 1:
log.error(
"Invalid registration for {}. Registration for opset {} should have end version of {}".format(
key, prev_start, start_version - 1
)
)
self.failed = True
return
self.last_op_registrations[key] = (start_version, end_version)
def ok(self):
return not self.failed
def validate_last_registrations(self):
for entry in self.last_op_registrations.items():
key, value = entry
opset_from, opset_to = value
deprecated = key in deprecated_ops and opset_to == deprecated_ops[key] - 1
if opset_to and not deprecated:
log.error("Missing unversioned registration for {}".format(key))
self.failed = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to validate operator kernel registrations.")
parser.add_argument(
"--ort_root",
type=str,
help="Path to ONNXRuntime repository root. " "Inferred from the location of this script if not provided.",
)
args = parser.parse_args()
ort_root = os.path.abspath(args.ort_root) if args.ort_root else ""
include_cuda = True # validate CPU and CUDA EP registrations
registration_files = op_registration_utils.get_kernel_registration_files(ort_root, include_cuda)
for file in registration_files:
log.info("Processing {}".format(file))
processor = RegistrationValidator()
op_registration_utils.process_kernel_registration_file(file, processor)
processor.validate_last_registrations()
if not processor.ok():
sys.exit(-1)
| true | true |
f7154930de7d5ffc6f439d636c13d83d490d8d16 | 1,853 | py | Python | src/users/models/microsoftgraphcalendar_permission.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/microsoftgraphcalendar_permission.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/microsoftgraphcalendar_permission.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphcalendarPermission(Model):
"""MicrosoftgraphcalendarPermission.
:param id:
:type id: str
:param email_address:
:type email_address: ~users.models.MicrosoftgraphemailAddress
:param is_removable:
:type is_removable: bool
:param is_inside_organization:
:type is_inside_organization: bool
:param role: Possible values include: 'none', 'freeBusyRead',
'limitedRead', 'read', 'write', 'delegateWithoutPrivateEventAccess',
'delegateWithPrivateEventAccess', 'custom'
:type role: str or ~users.models.enum
:param allowed_roles:
:type allowed_roles: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'MicrosoftgraphemailAddress'},
'is_removable': {'key': 'isRemovable', 'type': 'bool'},
'is_inside_organization': {'key': 'isInsideOrganization', 'type': 'bool'},
'role': {'key': 'role', 'type': 'str'},
'allowed_roles': {'key': 'allowedRoles', 'type': '[str]'},
}
def __init__(self, id=None, email_address=None, is_removable=None, is_inside_organization=None, role=None, allowed_roles=None):
super(MicrosoftgraphcalendarPermission, self).__init__()
self.id = id
self.email_address = email_address
self.is_removable = is_removable
self.is_inside_organization = is_inside_organization
self.role = role
self.allowed_roles = allowed_roles
| 39.425532 | 131 | 0.622774 |
from msrest.serialization import Model
class MicrosoftgraphcalendarPermission(Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'MicrosoftgraphemailAddress'},
'is_removable': {'key': 'isRemovable', 'type': 'bool'},
'is_inside_organization': {'key': 'isInsideOrganization', 'type': 'bool'},
'role': {'key': 'role', 'type': 'str'},
'allowed_roles': {'key': 'allowedRoles', 'type': '[str]'},
}
def __init__(self, id=None, email_address=None, is_removable=None, is_inside_organization=None, role=None, allowed_roles=None):
super(MicrosoftgraphcalendarPermission, self).__init__()
self.id = id
self.email_address = email_address
self.is_removable = is_removable
self.is_inside_organization = is_inside_organization
self.role = role
self.allowed_roles = allowed_roles
| true | true |
f7154c78addf3f568945d3b47a10b93101d1a781 | 4,877 | py | Python | xadmin/plugins/passwords.py | jneight/django-xadmin | d5fca423e3ae10e3ca086e5ae9ea7068872f0a29 | [
"BSD-3-Clause"
] | null | null | null | xadmin/plugins/passwords.py | jneight/django-xadmin | d5fca423e3ae10e3ca086e5ae9ea7068872f0a29 | [
"BSD-3-Clause"
] | null | null | null | xadmin/plugins/passwords.py | jneight/django-xadmin | d5fca423e3ae10e3ca086e5ae9ea7068872f0a29 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.views import password_reset_confirm
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.views.base import BaseAdminPlugin, BaseAdminView, csrf_protect_m
from xadmin.views.website import LoginView
class ResetPasswordSendView(BaseAdminView):
need_site_permission = False
password_reset_form = PasswordResetForm
password_reset_template = 'xadmin/auth/password_reset/form.html'
password_reset_done_template = 'xadmin/auth/password_reset/done.html'
password_reset_token_generator = default_token_generator
password_reset_from_email = None
password_reset_email_template = 'xadmin/auth/password_reset/email.html'
password_reset_subject_template = None
def get(self, request, *args, **kwargs):
context = super(ResetPasswordSendView, self).get_context()
context['form'] = kwargs.get('form', self.password_reset_form())
return TemplateResponse(request, self.password_reset_template, context,
current_app=self.admin_site.name)
@csrf_protect_m
def post(self, request, *args, **kwargs):
form = self.password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': self.password_reset_token_generator,
'email_template_name': self.password_reset_email_template,
'request': request,
'domain_override': request.get_host()
}
if self.password_reset_from_email:
opts['from_email'] = self.password_reset_from_email
if self.password_reset_subject_template:
opts['subject_template_name'] = self.password_reset_subject_template
form.save(**opts)
context = super(ResetPasswordSendView, self).get_context()
return TemplateResponse(request, self.password_reset_done_template, context,
current_app=self.admin_site.name)
else:
return self.get(request, form=form)
site.register_view(r'^xadmin/password_reset/$', ResetPasswordSendView, name='xadmin_password_reset')
class ResetLinkPlugin(BaseAdminPlugin):
def block_form_bottom(self, context, nodes):
reset_link = self.get_admin_url('xadmin_password_reset')
return '<div class="text-info" style="margin-top:15px;"><a href="%s"><i class="icon-question-sign"></i> %s</a></div>' % (reset_link, _('Forgotten your password or username?'))
site.register_plugin(ResetLinkPlugin, LoginView)
class ResetPasswordComfirmView(BaseAdminView):
need_site_permission = False
password_reset_set_form = SetPasswordForm
password_reset_confirm_template = 'xadmin/auth/password_reset/confirm.html'
password_reset_token_generator = default_token_generator
def do_view(self, request, uidb36, token, *args, **kwargs):
context = super(ResetPasswordComfirmView, self).get_context()
return password_reset_confirm(request, uidb36, token,
template_name=self.password_reset_confirm_template,
token_generator=self.password_reset_token_generator,
set_password_form=self.password_reset_set_form,
post_reset_redirect=self.get_admin_url('xadmin_password_reset_complete'),
current_app=self.admin_site.name, extra_context=context)
def get(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def post(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def get_media(self):
return super(ResetPasswordComfirmView, self).get_media() + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
site.register_view(r'^xadmin/password_reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
ResetPasswordComfirmView, name='xadmin_password_reset_confirm')
class ResetPasswordCompleteView(BaseAdminView):
need_site_permission = False
password_reset_complete_template = 'xadmin/auth/password_reset/complete.html'
def get(self, request, *args, **kwargs):
context = super(ResetPasswordCompleteView, self).get_context()
context['login_url'] = self.get_admin_url('index')
return TemplateResponse(request, self.password_reset_complete_template, context,
current_app=self.admin_site.name)
site.register_view(r'^xadmin/password_reset/complete/$', ResetPasswordCompleteView, name='xadmin_password_reset_complete')
| 42.408696 | 183 | 0.710478 |
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.views import password_reset_confirm
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.views.base import BaseAdminPlugin, BaseAdminView, csrf_protect_m
from xadmin.views.website import LoginView
class ResetPasswordSendView(BaseAdminView):
need_site_permission = False
password_reset_form = PasswordResetForm
password_reset_template = 'xadmin/auth/password_reset/form.html'
password_reset_done_template = 'xadmin/auth/password_reset/done.html'
password_reset_token_generator = default_token_generator
password_reset_from_email = None
password_reset_email_template = 'xadmin/auth/password_reset/email.html'
password_reset_subject_template = None
def get(self, request, *args, **kwargs):
context = super(ResetPasswordSendView, self).get_context()
context['form'] = kwargs.get('form', self.password_reset_form())
return TemplateResponse(request, self.password_reset_template, context,
current_app=self.admin_site.name)
@csrf_protect_m
def post(self, request, *args, **kwargs):
form = self.password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': self.password_reset_token_generator,
'email_template_name': self.password_reset_email_template,
'request': request,
'domain_override': request.get_host()
}
if self.password_reset_from_email:
opts['from_email'] = self.password_reset_from_email
if self.password_reset_subject_template:
opts['subject_template_name'] = self.password_reset_subject_template
form.save(**opts)
context = super(ResetPasswordSendView, self).get_context()
return TemplateResponse(request, self.password_reset_done_template, context,
current_app=self.admin_site.name)
else:
return self.get(request, form=form)
site.register_view(r'^xadmin/password_reset/$', ResetPasswordSendView, name='xadmin_password_reset')
class ResetLinkPlugin(BaseAdminPlugin):
def block_form_bottom(self, context, nodes):
reset_link = self.get_admin_url('xadmin_password_reset')
return '<div class="text-info" style="margin-top:15px;"><a href="%s"><i class="icon-question-sign"></i> %s</a></div>' % (reset_link, _('Forgotten your password or username?'))
site.register_plugin(ResetLinkPlugin, LoginView)
class ResetPasswordComfirmView(BaseAdminView):
need_site_permission = False
password_reset_set_form = SetPasswordForm
password_reset_confirm_template = 'xadmin/auth/password_reset/confirm.html'
password_reset_token_generator = default_token_generator
def do_view(self, request, uidb36, token, *args, **kwargs):
context = super(ResetPasswordComfirmView, self).get_context()
return password_reset_confirm(request, uidb36, token,
template_name=self.password_reset_confirm_template,
token_generator=self.password_reset_token_generator,
set_password_form=self.password_reset_set_form,
post_reset_redirect=self.get_admin_url('xadmin_password_reset_complete'),
current_app=self.admin_site.name, extra_context=context)
def get(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def post(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def get_media(self):
return super(ResetPasswordComfirmView, self).get_media() + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
site.register_view(r'^xadmin/password_reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
ResetPasswordComfirmView, name='xadmin_password_reset_confirm')
class ResetPasswordCompleteView(BaseAdminView):
need_site_permission = False
password_reset_complete_template = 'xadmin/auth/password_reset/complete.html'
def get(self, request, *args, **kwargs):
context = super(ResetPasswordCompleteView, self).get_context()
context['login_url'] = self.get_admin_url('index')
return TemplateResponse(request, self.password_reset_complete_template, context,
current_app=self.admin_site.name)
site.register_view(r'^xadmin/password_reset/complete/$', ResetPasswordCompleteView, name='xadmin_password_reset_complete')
| true | true |
f7154e6ded9574e14e9389f664090a3155de4514 | 18,588 | py | Python | Lib/idlelib/idle_test/test_pyparse.py | fongchinghinunsw/cpython | 19926d058dc33856631c6c6b3fcb45b04fcab666 | [
"CNRI-Python-GPL-Compatible"
] | 120 | 2019-11-12T19:22:44.000Z | 2020-05-17T12:17:25.000Z | Lib/idlelib/idle_test/test_pyparse.py | fongchinghinunsw/cpython | 19926d058dc33856631c6c6b3fcb45b04fcab666 | [
"CNRI-Python-GPL-Compatible"
] | 19 | 2021-02-18T05:59:03.000Z | 2022-01-13T01:00:52.000Z | Lib/idlelib/idle_test/test_pyparse.py | fongchinghinunsw/cpython | 19926d058dc33856631c6c6b3fcb45b04fcab666 | [
"CNRI-Python-GPL-Compatible"
] | 18 | 2021-02-22T13:32:56.000Z | 2022-01-22T12:38:29.000Z | "Test pyparse, coverage 96%."
from idlelib import pyparse
import unittest
from collections import namedtuple
class ParseMapTest(unittest.TestCase):
def test_parsemap(self):
keepwhite = {ord(c): ord(c) for c in ' \t\n\r'}
mapping = pyparse.ParseMap(keepwhite)
self.assertEqual(mapping[ord('\t')], ord('\t'))
self.assertEqual(mapping[ord('a')], ord('x'))
self.assertEqual(mapping[1000], ord('x'))
def test_trans(self):
# trans is the production instance of ParseMap, used in _study1
parser = pyparse.Parser(4, 4)
self.assertEqual('\t a([{b}])b"c\'d\n'.translate(pyparse.trans),
'xxx(((x)))x"x\'x\n')
class PyParseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = pyparse.Parser(indentwidth=4, tabwidth=4)
@classmethod
def tearDownClass(cls):
del cls.parser
def test_init(self):
self.assertEqual(self.parser.indentwidth, 4)
self.assertEqual(self.parser.tabwidth, 4)
def test_set_code(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
# Not empty and doesn't end with newline.
with self.assertRaises(AssertionError):
setcode('a')
tests = ('',
'a\n')
for string in tests:
with self.subTest(string=string):
setcode(string)
eq(p.code, string)
eq(p.study_level, 0)
def test_find_good_parse_start(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
start = p.find_good_parse_start
# Split def across lines.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
# No value sent for is_char_in_string().
self.assertIsNone(start())
# Make text look like a string. This returns pos as the start
# position, but it's set to None.
self.assertIsNone(start(is_char_in_string=lambda index: True))
# Make all text look like it's not in a string. This means that it
# found a good start position.
eq(start(is_char_in_string=lambda index: False), 44)
# If the beginning of the def line is not in a string, then it
# returns that as the index.
eq(start(is_char_in_string=lambda index: index > 44), 44)
# If the beginning of the def line is in a string, then it
# looks for a previous index.
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# If everything before the 'def' is in a string, then returns None.
# The non-continuation def line returns 44 (see below).
eq(start(is_char_in_string=lambda index: index < 44), None)
# Code without extra line break in def line - mostly returns the same
# values.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a, b=True):\n'
' pass\n'
)
eq(start(is_char_in_string=lambda index: False), 44)
eq(start(is_char_in_string=lambda index: index > 44), 44)
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# When the def line isn't split, this returns which doesn't match the
# split line test.
eq(start(is_char_in_string=lambda index: index < 44), 44)
def test_set_lo(self):
code = (
'"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
p = self.parser
p.set_code(code)
# Previous character is not a newline.
with self.assertRaises(AssertionError):
p.set_lo(5)
# A value of 0 doesn't change self.code.
p.set_lo(0)
self.assertEqual(p.code, code)
# An index that is preceded by a newline.
p.set_lo(44)
self.assertEqual(p.code, code[44:])
def test_study1(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study1
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'goodlines',
'continuation'])
tests = (
TestInfo('', [0], NONE),
# Docstrings.
TestInfo('"""This is a complete docstring."""\n', [0, 1], NONE),
TestInfo("'''This is a complete docstring.'''\n", [0, 1], NONE),
TestInfo('"""This is a continued docstring.\n', [0, 1], FIRST),
TestInfo("'''This is a continued docstring.\n", [0, 1], FIRST),
TestInfo('"""Closing quote does not match."\n', [0, 1], FIRST),
TestInfo('"""Bracket in docstring [\n', [0, 1], FIRST),
TestInfo("'''Incomplete two line docstring.\n\n", [0, 2], NEXT),
# Single-quoted strings.
TestInfo('"This is a complete string."\n', [0, 1], NONE),
TestInfo('"This is an incomplete string.\n', [0, 1], NONE),
TestInfo("'This is more incomplete.\n\n", [0, 1, 2], NONE),
# Comment (backslash does not continue comments).
TestInfo('# Comment\\\n', [0, 1], NONE),
# Brackets.
TestInfo('("""Complete string in bracket"""\n', [0, 1], BRACKET),
TestInfo('("""Open string in bracket\n', [0, 1], FIRST),
TestInfo('a = (1 + 2) - 5 *\\\n', [0, 1], BACKSLASH), # No bracket.
TestInfo('\n def function1(self, a,\n b):\n',
[0, 1, 3], NONE),
TestInfo('\n def function1(self, a,\\\n', [0, 1, 2], BRACKET),
TestInfo('\n def function1(self, a,\n', [0, 1, 2], BRACKET),
TestInfo('())\n', [0, 1], NONE), # Extra closer.
TestInfo(')(\n', [0, 1], BRACKET), # Extra closer.
# For the mismatched example, it doesn't look like continuation.
TestInfo('{)(]\n', [0, 1], NONE), # Mismatched.
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string) # resets study_level
study()
eq(p.study_level, 1)
eq(p.goodlines, test.goodlines)
eq(p.continuation, test.continuation)
# Called again, just returns without reprocessing.
self.assertIsNone(study())
def test_get_continuation_type(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
gettype = p.get_continuation_type
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'continuation'])
tests = (
TestInfo('', NONE),
TestInfo('"""This is a continuation docstring.\n', FIRST),
TestInfo("'''This is a multiline-continued docstring.\n\n", NEXT),
TestInfo('a = (1 + 2) - 5 *\\\n', BACKSLASH),
TestInfo('\n def function1(self, a,\\\n', BRACKET)
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(gettype(), test.continuation)
def test_study2(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study2
TestInfo = namedtuple('TestInfo', ['string', 'start', 'end', 'lastch',
'openbracket', 'bracketing'])
tests = (
TestInfo('', 0, 0, '', None, ((0, 0),)),
TestInfo("'''This is a multiline continuation docstring.\n\n",
0, 48, "'", None, ((0, 0), (0, 1), (48, 0))),
TestInfo(' # Comment\\\n',
0, 12, '', None, ((0, 0), (1, 1), (12, 0))),
# A comment without a space is a special case
TestInfo(' #Comment\\\n',
0, 0, '', None, ((0, 0),)),
# Backslash continuation.
TestInfo('a = (1 + 2) - 5 *\\\n',
0, 19, '*', None, ((0, 0), (4, 1), (11, 0))),
# Bracket continuation with close.
TestInfo('\n def function1(self, a,\n b):\n',
1, 48, ':', None, ((1, 0), (17, 1), (46, 0))),
# Bracket continuation with unneeded backslash.
TestInfo('\n def function1(self, a,\\\n',
1, 28, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation.
TestInfo('\n def function1(self, a,\n',
1, 27, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation with comment at end of line with text.
TestInfo('\n def function1(self, a, # End of line comment.\n',
1, 51, ',', 17, ((1, 0), (17, 1), (28, 2), (51, 1))),
# Multi-line statement with comment line in between code lines.
TestInfo(' a = ["first item",\n # Comment line\n "next item",\n',
0, 55, ',', 6, ((0, 0), (6, 1), (7, 2), (19, 1),
(23, 2), (38, 1), (42, 2), (53, 1))),
TestInfo('())\n',
0, 4, ')', None, ((0, 0), (0, 1), (2, 0), (3, 0))),
TestInfo(')(\n', 0, 3, '(', 1, ((0, 0), (1, 0), (1, 1))),
# Wrong closers still decrement stack level.
TestInfo('{)(]\n',
0, 5, ']', None, ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
# Character after backslash.
TestInfo(':\\a\n', 0, 4, '\\a', None, ((0, 0),)),
TestInfo('\n', 0, 0, '', None, ((0, 0),)),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
study()
eq(p.study_level, 2)
eq(p.stmt_start, test.start)
eq(p.stmt_end, test.end)
eq(p.lastch, test.lastch)
eq(p.lastopenbracketpos, test.openbracket)
eq(p.stmt_bracketing, test.bracketing)
# Called again, just returns without reprocessing.
self.assertIsNone(study())
def test_get_num_lines_in_stmt(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
getlines = p.get_num_lines_in_stmt
TestInfo = namedtuple('TestInfo', ['string', 'lines'])
tests = (
TestInfo('[x for x in a]\n', 1), # Closed on one line.
TestInfo('[x\nfor x in a\n', 2), # Not closed.
TestInfo('[x\\\nfor x in a\\\n', 2), # "", uneeded backslashes.
TestInfo('[x\nfor x in a\n]\n', 3), # Closed on multi-line.
TestInfo('\n"""Docstring comment L1"""\nL2\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\nL2"""\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n', 4),
TestInfo('\n\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n"""\n', 5)
)
# Blank string doesn't have enough elements in goodlines.
setcode('')
with self.assertRaises(IndexError):
getlines()
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(getlines(), test.lines)
def test_compute_bracket_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_bracket_indent
TestInfo = namedtuple('TestInfo', ['string', 'spaces'])
tests = (
TestInfo('def function1(self, a,\n', 14),
# Characters after bracket.
TestInfo('\n def function1(self, a,\n', 18),
TestInfo('\n\tdef function1(self, a,\n', 18),
# No characters after bracket.
TestInfo('\n def function1(\n', 8),
TestInfo('\n\tdef function1(\n', 8),
TestInfo('\n def function1( \n', 8), # Ignore extra spaces.
TestInfo('[\n"first item",\n # Comment line\n "next item",\n', 0),
TestInfo('[\n "first item",\n # Comment line\n "next item",\n', 2),
TestInfo('["first item",\n # Comment line\n "next item",\n', 1),
TestInfo('(\n', 4),
TestInfo('(a\n', 1),
)
# Must be C_BRACKET continuation type.
setcode('def function1(self, a, b):\n')
with self.assertRaises(AssertionError):
indent()
for test in tests:
setcode(test.string)
eq(indent(), test.spaces)
def test_compute_backslash_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_backslash_indent
# Must be C_BACKSLASH continuation type.
errors = (('def function1(self, a, b\\\n'), # Bracket.
(' """ (\\\n'), # Docstring.
('a = #\\\n'), # Inline comment.
)
for string in errors:
with self.subTest(string=string):
setcode(string)
with self.assertRaises(AssertionError):
indent()
TestInfo = namedtuple('TestInfo', ('string', 'spaces'))
tests = (TestInfo('a = (1 + 2) - 5 *\\\n', 4),
TestInfo('a = 1 + 2 - 5 *\\\n', 4),
TestInfo(' a = 1 + 2 - 5 *\\\n', 8),
TestInfo(' a = "spam"\\\n', 6),
TestInfo(' a = \\\n"a"\\\n', 4),
TestInfo(' a = #\\\n"a"\\\n', 5),
TestInfo('a == \\\n', 2),
TestInfo('a != \\\n', 2),
# Difference between containing = and those not.
TestInfo('\\\n', 2),
TestInfo(' \\\n', 6),
TestInfo('\t\\\n', 6),
TestInfo('a\\\n', 3),
TestInfo('{}\\\n', 4),
TestInfo('(1 + 2) - 5 *\\\n', 3),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(indent(), test.spaces)
def test_get_base_indent_string(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
baseindent = p.get_base_indent_string
TestInfo = namedtuple('TestInfo', ['string', 'indent'])
tests = (TestInfo('', ''),
TestInfo('def a():\n', ''),
TestInfo('\tdef a():\n', '\t'),
TestInfo(' def a():\n', ' '),
TestInfo(' def a(\n', ' '),
TestInfo('\t\n def a(\n', ' '),
TestInfo('\t\n # Comment.\n', ' '),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(baseindent(), test.indent)
def test_is_block_opener(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
opener = p.is_block_opener
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('def a():\n', yes),
TestInfo('\n def function1(self, a,\n b):\n', yes),
TestInfo(':\n', yes),
TestInfo('a:\n', yes),
TestInfo('):\n', yes),
TestInfo('(:\n', yes),
TestInfo('":\n', no),
TestInfo('\n def function1(self, a,\n', no),
TestInfo('def function1(self, a):\n pass\n', no),
TestInfo('# A comment:\n', no),
TestInfo('"""A docstring:\n', no),
TestInfo('"""A docstring:\n', no),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(opener())
def test_is_block_closer(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
closer = p.is_block_closer
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('return\n', yes),
TestInfo('\tbreak\n', yes),
TestInfo(' continue\n', yes),
TestInfo(' raise\n', yes),
TestInfo('pass \n', yes),
TestInfo('pass\t\n', yes),
TestInfo('return #\n', yes),
TestInfo('raised\n', no),
TestInfo('returning\n', no),
TestInfo('# return\n', no),
TestInfo('"""break\n', no),
TestInfo('"continue\n', no),
TestInfo('def function1(self, a):\n pass\n', yes),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(closer())
def test_get_last_stmt_bracketing(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
bracketing = p.get_last_stmt_bracketing
TestInfo = namedtuple('TestInfo', ['string', 'bracket'])
tests = (
TestInfo('', ((0, 0),)),
TestInfo('a\n', ((0, 0),)),
TestInfo('()()\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(\n)()\n', ((0, 0), (0, 1), (3, 0), (3, 1), (5, 0))),
TestInfo('()\n()\n', ((3, 0), (3, 1), (5, 0))),
TestInfo('()(\n)\n', ((0, 0), (0, 1), (2, 0), (2, 1), (5, 0))),
TestInfo('(())\n', ((0, 0), (0, 1), (1, 2), (3, 1), (4, 0))),
TestInfo('(\n())\n', ((0, 0), (0, 1), (2, 2), (4, 1), (5, 0))),
# Same as matched test.
TestInfo('{)(]\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(((())\n',
((0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (5, 3), (6, 2))),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(bracketing(), test.bracket)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 39.802998 | 84 | 0.48074 |
from idlelib import pyparse
import unittest
from collections import namedtuple
class ParseMapTest(unittest.TestCase):
def test_parsemap(self):
keepwhite = {ord(c): ord(c) for c in ' \t\n\r'}
mapping = pyparse.ParseMap(keepwhite)
self.assertEqual(mapping[ord('\t')], ord('\t'))
self.assertEqual(mapping[ord('a')], ord('x'))
self.assertEqual(mapping[1000], ord('x'))
def test_trans(self):
parser = pyparse.Parser(4, 4)
self.assertEqual('\t a([{b}])b"c\'d\n'.translate(pyparse.trans),
'xxx(((x)))x"x\'x\n')
class PyParseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = pyparse.Parser(indentwidth=4, tabwidth=4)
@classmethod
def tearDownClass(cls):
del cls.parser
def test_init(self):
self.assertEqual(self.parser.indentwidth, 4)
self.assertEqual(self.parser.tabwidth, 4)
def test_set_code(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
with self.assertRaises(AssertionError):
setcode('a')
tests = ('',
'a\n')
for string in tests:
with self.subTest(string=string):
setcode(string)
eq(p.code, string)
eq(p.study_level, 0)
def test_find_good_parse_start(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
start = p.find_good_parse_start
# Split def across lines.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
# No value sent for is_char_in_string().
self.assertIsNone(start())
# Make text look like a string. This returns pos as the start
# position, but it's set to None.
self.assertIsNone(start(is_char_in_string=lambda index: True))
# found a good start position.
eq(start(is_char_in_string=lambda index: False), 44)
# If the beginning of the def line is not in a string, then it
# returns that as the index.
eq(start(is_char_in_string=lambda index: index > 44), 44)
# If the beginning of the def line is in a string, then it
# looks for a previous index.
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# If everything before the 'def' is in a string, then returns None.
# The non-continuation def line returns 44 (see below).
eq(start(is_char_in_string=lambda index: index < 44), None)
# Code without extra line break in def line - mostly returns the same
# values.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a, b=True):\n'
' pass\n'
)
eq(start(is_char_in_string=lambda index: False), 44)
eq(start(is_char_in_string=lambda index: index > 44), 44)
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# When the def line isn't split, this returns which doesn't match the
# split line test.
eq(start(is_char_in_string=lambda index: index < 44), 44)
def test_set_lo(self):
code = (
'"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
p = self.parser
p.set_code(code)
# Previous character is not a newline.
with self.assertRaises(AssertionError):
p.set_lo(5)
# A value of 0 doesn't change self.code.
p.set_lo(0)
self.assertEqual(p.code, code)
p.set_lo(44)
self.assertEqual(p.code, code[44:])
def test_study1(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study1
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'goodlines',
'continuation'])
tests = (
TestInfo('', [0], NONE),
TestInfo('"""This is a complete docstring."""\n', [0, 1], NONE),
TestInfo("'''This is a complete docstring.'''\n", [0, 1], NONE),
TestInfo('"""This is a continued docstring.\n', [0, 1], FIRST),
TestInfo("'''This is a continued docstring.\n", [0, 1], FIRST),
TestInfo('"""Closing quote does not match."\n', [0, 1], FIRST),
TestInfo('"""Bracket in docstring [\n', [0, 1], FIRST),
TestInfo("'''Incomplete two line docstring.\n\n", [0, 2], NEXT),
# Single-quoted strings.
TestInfo('"This is a complete string."\n', [0, 1], NONE),
TestInfo('"This is an incomplete string.\n', [0, 1], NONE),
TestInfo("'This is more incomplete.\n\n", [0, 1, 2], NONE),
# Comment (backslash does not continue comments).
TestInfo('# Comment\\\n', [0, 1], NONE),
# Brackets.
TestInfo('("""Complete string in bracket"""\n', [0, 1], BRACKET),
TestInfo('("""Open string in bracket\n', [0, 1], FIRST),
TestInfo('a = (1 + 2) - 5 *\\\n', [0, 1], BACKSLASH), # No bracket.
TestInfo('\n def function1(self, a,\n b):\n',
[0, 1, 3], NONE),
TestInfo('\n def function1(self, a,\\\n', [0, 1, 2], BRACKET),
TestInfo('\n def function1(self, a,\n', [0, 1, 2], BRACKET),
TestInfo('())\n', [0, 1], NONE), # Extra closer.
TestInfo(')(\n', [0, 1], BRACKET), # Extra closer.
# For the mismatched example, it doesn't look like continuation.
TestInfo('{)(]\n', [0, 1], NONE),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
study()
eq(p.study_level, 1)
eq(p.goodlines, test.goodlines)
eq(p.continuation, test.continuation)
self.assertIsNone(study())
def test_get_continuation_type(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
gettype = p.get_continuation_type
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'continuation'])
tests = (
TestInfo('', NONE),
TestInfo('"""This is a continuation docstring.\n', FIRST),
TestInfo("'''This is a multiline-continued docstring.\n\n", NEXT),
TestInfo('a = (1 + 2) - 5 *\\\n', BACKSLASH),
TestInfo('\n def function1(self, a,\\\n', BRACKET)
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(gettype(), test.continuation)
def test_study2(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study2
TestInfo = namedtuple('TestInfo', ['string', 'start', 'end', 'lastch',
'openbracket', 'bracketing'])
tests = (
TestInfo('', 0, 0, '', None, ((0, 0),)),
TestInfo("'''This is a multiline continuation docstring.\n\n",
0, 48, "'", None, ((0, 0), (0, 1), (48, 0))),
TestInfo(' # Comment\\\n',
0, 12, '', None, ((0, 0), (1, 1), (12, 0))),
# A comment without a space is a special case
TestInfo(' #Comment\\\n',
0, 0, '', None, ((0, 0),)),
# Backslash continuation.
TestInfo('a = (1 + 2) - 5 *\\\n',
0, 19, '*', None, ((0, 0), (4, 1), (11, 0))),
# Bracket continuation with close.
TestInfo('\n def function1(self, a,\n b):\n',
1, 48, ':', None, ((1, 0), (17, 1), (46, 0))),
# Bracket continuation with unneeded backslash.
TestInfo('\n def function1(self, a,\\\n',
1, 28, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation.
TestInfo('\n def function1(self, a,\n',
1, 27, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation with comment at end of line with text.
TestInfo('\n def function1(self, a, # End of line comment.\n',
1, 51, ',', 17, ((1, 0), (17, 1), (28, 2), (51, 1))),
# Multi-line statement with comment line in between code lines.
TestInfo(' a = ["first item",\n # Comment line\n "next item",\n',
0, 55, ',', 6, ((0, 0), (6, 1), (7, 2), (19, 1),
(23, 2), (38, 1), (42, 2), (53, 1))),
TestInfo('())\n',
0, 4, ')', None, ((0, 0), (0, 1), (2, 0), (3, 0))),
TestInfo(')(\n', 0, 3, '(', 1, ((0, 0), (1, 0), (1, 1))),
# Wrong closers still decrement stack level.
TestInfo('{)(]\n',
0, 5, ']', None, ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
# Character after backslash.
TestInfo(':\\a\n', 0, 4, '\\a', None, ((0, 0),)),
TestInfo('\n', 0, 0, '', None, ((0, 0),)),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
study()
eq(p.study_level, 2)
eq(p.stmt_start, test.start)
eq(p.stmt_end, test.end)
eq(p.lastch, test.lastch)
eq(p.lastopenbracketpos, test.openbracket)
eq(p.stmt_bracketing, test.bracketing)
# Called again, just returns without reprocessing.
self.assertIsNone(study())
def test_get_num_lines_in_stmt(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
getlines = p.get_num_lines_in_stmt
TestInfo = namedtuple('TestInfo', ['string', 'lines'])
tests = (
TestInfo('[x for x in a]\n', 1), # Closed on one line.
TestInfo('[x\nfor x in a\n', 2), # Not closed.
TestInfo('[x\\\nfor x in a\\\n', 2), # "", uneeded backslashes.
TestInfo('[x\nfor x in a\n]\n', 3), # Closed on multi-line.
TestInfo('\n"""Docstring comment L1"""\nL2\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\nL2"""\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n', 4),
TestInfo('\n\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n"""\n', 5)
)
# Blank string doesn't have enough elements in goodlines.
setcode('')
with self.assertRaises(IndexError):
getlines()
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(getlines(), test.lines)
def test_compute_bracket_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_bracket_indent
TestInfo = namedtuple('TestInfo', ['string', 'spaces'])
tests = (
TestInfo('def function1(self, a,\n', 14),
TestInfo('\n def function1(self, a,\n', 18),
TestInfo('\n\tdef function1(self, a,\n', 18),
TestInfo('\n def function1(\n', 8),
TestInfo('\n\tdef function1(\n', 8),
TestInfo('\n def function1( \n', 8),
TestInfo('[\n"first item",\n # Comment line\n "next item",\n', 0),
TestInfo('[\n "first item",\n # Comment line\n "next item",\n', 2),
TestInfo('["first item",\n # Comment line\n "next item",\n', 1),
TestInfo('(\n', 4),
TestInfo('(a\n', 1),
)
setcode('def function1(self, a, b):\n')
with self.assertRaises(AssertionError):
indent()
for test in tests:
setcode(test.string)
eq(indent(), test.spaces)
def test_compute_backslash_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_backslash_indent
errors = (('def function1(self, a, b\\\n'),
(' """ (\\\n'), # Docstring.
('a = #\\\n'), # Inline comment.
)
for string in errors:
with self.subTest(string=string):
setcode(string)
with self.assertRaises(AssertionError):
indent()
TestInfo = namedtuple('TestInfo', ('string', 'spaces'))
tests = (TestInfo('a = (1 + 2) - 5 *\\\n', 4),
TestInfo('a = 1 + 2 - 5 *\\\n', 4),
TestInfo(' a = 1 + 2 - 5 *\\\n', 8),
TestInfo(' a = "spam"\\\n', 6),
TestInfo(' a = \\\n"a"\\\n', 4),
TestInfo(' a = #\\\n"a"\\\n', 5),
TestInfo('a == \\\n', 2),
TestInfo('a != \\\n', 2),
# Difference between containing = and those not.
TestInfo('\\\n', 2),
TestInfo(' \\\n', 6),
TestInfo('\t\\\n', 6),
TestInfo('a\\\n', 3),
TestInfo('{}\\\n', 4),
TestInfo('(1 + 2) - 5 *\\\n', 3),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(indent(), test.spaces)
def test_get_base_indent_string(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
baseindent = p.get_base_indent_string
TestInfo = namedtuple('TestInfo', ['string', 'indent'])
tests = (TestInfo('', ''),
TestInfo('def a():\n', ''),
TestInfo('\tdef a():\n', '\t'),
TestInfo(' def a():\n', ' '),
TestInfo(' def a(\n', ' '),
TestInfo('\t\n def a(\n', ' '),
TestInfo('\t\n # Comment.\n', ' '),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(baseindent(), test.indent)
def test_is_block_opener(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
opener = p.is_block_opener
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('def a():\n', yes),
TestInfo('\n def function1(self, a,\n b):\n', yes),
TestInfo(':\n', yes),
TestInfo('a:\n', yes),
TestInfo('):\n', yes),
TestInfo('(:\n', yes),
TestInfo('":\n', no),
TestInfo('\n def function1(self, a,\n', no),
TestInfo('def function1(self, a):\n pass\n', no),
TestInfo('# A comment:\n', no),
TestInfo('"""A docstring:\n', no),
TestInfo('"""A docstring:\n', no),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(opener())
def test_is_block_closer(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
closer = p.is_block_closer
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('return\n', yes),
TestInfo('\tbreak\n', yes),
TestInfo(' continue\n', yes),
TestInfo(' raise\n', yes),
TestInfo('pass \n', yes),
TestInfo('pass\t\n', yes),
TestInfo('return #\n', yes),
TestInfo('raised\n', no),
TestInfo('returning\n', no),
TestInfo('# return\n', no),
TestInfo('"""break\n', no),
TestInfo('"continue\n', no),
TestInfo('def function1(self, a):\n pass\n', yes),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(closer())
def test_get_last_stmt_bracketing(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
bracketing = p.get_last_stmt_bracketing
TestInfo = namedtuple('TestInfo', ['string', 'bracket'])
tests = (
TestInfo('', ((0, 0),)),
TestInfo('a\n', ((0, 0),)),
TestInfo('()()\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(\n)()\n', ((0, 0), (0, 1), (3, 0), (3, 1), (5, 0))),
TestInfo('()\n()\n', ((3, 0), (3, 1), (5, 0))),
TestInfo('()(\n)\n', ((0, 0), (0, 1), (2, 0), (2, 1), (5, 0))),
TestInfo('(())\n', ((0, 0), (0, 1), (1, 2), (3, 1), (4, 0))),
TestInfo('(\n())\n', ((0, 0), (0, 1), (2, 2), (4, 1), (5, 0))),
TestInfo('{)(]\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(((())\n',
((0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (5, 3), (6, 2))),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(bracketing(), test.bracket)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f715502ca425f6aa72c39448be881ca66c5df2be | 1,703 | py | Python | game.py | zty111/tonghua | 71b0ecc857f72ab9bb7882358c15587117cdcd6a | [
"MIT"
] | null | null | null | game.py | zty111/tonghua | 71b0ecc857f72ab9bb7882358c15587117cdcd6a | [
"MIT"
] | null | null | null | game.py | zty111/tonghua | 71b0ecc857f72ab9bb7882358c15587117cdcd6a | [
"MIT"
] | null | null | null | from keras.saving.save import load_model
from board import GameState, Player
from encoder import Encoder
from agent import Agent
import scoring
from board import Move, Point
from tiaocan import bot_name
class My():
def select_move(self, game_state):
print("请输入点坐标和方向(或弃权):")
x, y, d = input().split(' ')
x, y, d = int(x), int(y), int(d)
move = Move(Point(x, y), d)
if game_state.is_valid_move(move): return move
else: return Move.pass_turn()
def simulate_game(black_agent, white_agent):
print('Starting the game!')
game = GameState.new_game()
agents = {
Player.black: black_agent,
Player.white: white_agent
}
while not game.is_over():
game.print()
if game.next_player == Player.black: next_move = agents[game.next_player].greedy_move(game)
else: next_move = agents[game.next_player].select_move(game, False)
if next_move.is_pass: print("Pass!")
else: print(next_move.point, next_move.direction)
game = game.apply_move(next_move)
game_result = scoring.compute_game_result(game)
if game_result == Player.black:
print("You win!")
else:
print("Bot Zero win!")
encoder = Encoder()
model = load_model(bot_name)
black_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
white_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
print()
print("欢迎对局!")
print("输入为3个以空格隔开的数字")
print("前2个为点坐标(1~7)")
print("第3个为方向(0~23),具体如下")
print("0\t1\t2\t3\t4")
print("5\t6\t7\t8\t9")
print("10\t11\t棋子\t12\t13")
print("14\t15\t16\t17\t18")
print("19\t20\t21\t22\t23")
print("不要输错哦!")
simulate_game(black_agent, white_agent) | 28.864407 | 99 | 0.664709 | from keras.saving.save import load_model
from board import GameState, Player
from encoder import Encoder
from agent import Agent
import scoring
from board import Move, Point
from tiaocan import bot_name
class My():
def select_move(self, game_state):
print("请输入点坐标和方向(或弃权):")
x, y, d = input().split(' ')
x, y, d = int(x), int(y), int(d)
move = Move(Point(x, y), d)
if game_state.is_valid_move(move): return move
else: return Move.pass_turn()
def simulate_game(black_agent, white_agent):
print('Starting the game!')
game = GameState.new_game()
agents = {
Player.black: black_agent,
Player.white: white_agent
}
while not game.is_over():
game.print()
if game.next_player == Player.black: next_move = agents[game.next_player].greedy_move(game)
else: next_move = agents[game.next_player].select_move(game, False)
if next_move.is_pass: print("Pass!")
else: print(next_move.point, next_move.direction)
game = game.apply_move(next_move)
game_result = scoring.compute_game_result(game)
if game_result == Player.black:
print("You win!")
else:
print("Bot Zero win!")
encoder = Encoder()
model = load_model(bot_name)
black_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
white_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
print()
print("欢迎对局!")
print("输入为3个以空格隔开的数字")
print("前2个为点坐标(1~7)")
print("第3个为方向(0~23),具体如下")
print("0\t1\t2\t3\t4")
print("5\t6\t7\t8\t9")
print("10\t11\t棋子\t12\t13")
print("14\t15\t16\t17\t18")
print("19\t20\t21\t22\t23")
print("不要输错哦!")
simulate_game(black_agent, white_agent) | true | true |
f715517d413224bd0e232c087a3dc3de8fac5148 | 2,409 | py | Python | DATA/10_64_64_64_1E7/analy.py | Aieener/SUS_3D | 8fc5a768a2339238939522baf96bce98bf61902e | [
"MIT"
] | null | null | null | DATA/10_64_64_64_1E7/analy.py | Aieener/SUS_3D | 8fc5a768a2339238939522baf96bce98bf61902e | [
"MIT"
] | null | null | null | DATA/10_64_64_64_1E7/analy.py | Aieener/SUS_3D | 8fc5a768a2339238939522baf96bce98bf61902e | [
"MIT"
] | null | null | null | # analy.py
# A python program to analyze the SUS weighting function in order to reach the following goals:
# 1. plot the weight function
# 2. generate the normalized distribution for Z=1
# 3. extrapolate the N distribution for different Zs given by the user.
# Author: Yuding Ai
# Date: 2015 Oct 23
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def PN():
WF = [] # a list of my target Weighting function
PN = [] # a list of number distribution
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0]) #take the value
WF.append(n); #append value into my WF list
maxi = max(WF)
if maxi > 500:
for i in range(len(WF)):
WF[i] = WF[i]-maxi +500
PN.append(math.exp(WF[i]));
PN = [float(i)/sum(PN) for i in PN]
return WF,PN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'P(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(8600,9600)
plt.ylim(0,0.007)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def Wplot(WN):
fig = plt.figure()
plt.plot(WN,'+r',markersize=1,)
plt.ylabel('Weighting Function')
plt.xlabel('N')
title = 'WeightingFunc.png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(W,z):
P = [] # a list of number distribution
for i in range(len(W)):
W[i] = W[i] + i*math.log(z)
maxi = max(W)
if maxi > 500:
for j in range(len(W)):
W[j] = W[j]-maxi +500
P.append(math.exp(W[j]));
P = [float(k)/sum(P) for k in P]
return P
def main():
P = PN()[1] # take the P(N;z=1)
W = PN()[0] # take the original weighting function
Wplot(W)
# Pplot(P,"1")
# Pe = exploPN(W,4.44)
# enlargePplot(Pe,4.44)
# for i in range(10):
# W = PN()[0] # take the original weighting function
# t = 3.83 + 0.02*i
# Pe = exploPN(W,t)
# # Pplot(Pe,t)
# enlargePplot(Pe,t)
main()
| 23.38835 | 95 | 0.632213 |
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
Palatino']})
rc('text', usetex=True)
def PN():
WF = []
PN = []
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0])
WF.append(n);
maxi = max(WF)
if maxi > 500:
for i in range(len(WF)):
WF[i] = WF[i]-maxi +500
PN.append(math.exp(WF[i]));
PN = [float(i)/sum(PN) for i in PN]
return WF,PN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'P(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(8600,9600)
plt.ylim(0,0.007)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def Wplot(WN):
fig = plt.figure()
plt.plot(WN,'+r',markersize=1,)
plt.ylabel('Weighting Function')
plt.xlabel('N')
title = 'WeightingFunc.png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(W,z):
P = []
for i in range(len(W)):
W[i] = W[i] + i*math.log(z)
maxi = max(W)
if maxi > 500:
for j in range(len(W)):
W[j] = W[j]-maxi +500
P.append(math.exp(W[j]));
P = [float(k)/sum(P) for k in P]
return P
def main():
P = PN()[1]
W = PN()[0]
Wplot(W)
| true | true |
f71552f33127dfdd46d5834de303cbaed5612835 | 1,326 | py | Python | asset/lambda/index.py | jialechan/cdk-elasticache-monitor | 584d1f583e934e32d80f1abea7fdc100c226b348 | [
"Apache-2.0"
] | 1 | 2020-07-27T09:15:41.000Z | 2020-07-27T09:15:41.000Z | asset/lambda/index.py | jialechan/cdk-elasticache-monitor | 584d1f583e934e32d80f1abea7fdc100c226b348 | [
"Apache-2.0"
] | 509 | 2020-08-04T07:02:41.000Z | 2022-03-28T15:05:51.000Z | asset/lambda/index.py | jialechan/cdk-elasticache-monitor | 584d1f583e934e32d80f1abea7fdc100c226b348 | [
"Apache-2.0"
] | 1 | 2020-08-28T01:13:15.000Z | 2020-08-28T01:13:15.000Z | import os
import json
import time
import urllib.parse
import urllib.request
def handler(event, context):
"""
alarm to slack
"""
print(json.dumps(event))
slack_webhook_url = os.environ['SLACK_WEBHOOK_URL']
channel = os.environ['CHANNEL']
username = os.environ['USERNAME']
icon_emoji = os.environ['ICON_EMOJI']
for record in event.get("Records"):
message = json.loads(record.get("Sns").get("Message"))
title = message.get("AlarmName")
info = message.get("AlarmDescription")
newStateReason = message.get("NewStateReason")
region = os.environ['AWS_REGION']
log = "https://" + region + ".console.aws.amazon.com/cloudwatch/home?region=" + \
region + "#alarmsV2:alarm/" + title + "?~(alarmStateFilter~'ALARM)"
values = {
"channel": channel,
"username": username,
"text": title + "\n" + info + "\n" + newStateReason + "\n" + "<" + log + "|AlarmState>",
"icon_emoji": icon_emoji
}
params = json.dumps(values).encode('utf8')
req = urllib.request.Request(slack_webhook_url, data=params, headers={
'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read())
| 28.826087 | 100 | 0.585973 | import os
import json
import time
import urllib.parse
import urllib.request
def handler(event, context):
print(json.dumps(event))
slack_webhook_url = os.environ['SLACK_WEBHOOK_URL']
channel = os.environ['CHANNEL']
username = os.environ['USERNAME']
icon_emoji = os.environ['ICON_EMOJI']
for record in event.get("Records"):
message = json.loads(record.get("Sns").get("Message"))
title = message.get("AlarmName")
info = message.get("AlarmDescription")
newStateReason = message.get("NewStateReason")
region = os.environ['AWS_REGION']
log = "https://" + region + ".console.aws.amazon.com/cloudwatch/home?region=" + \
region + "#alarmsV2:alarm/" + title + "?~(alarmStateFilter~'ALARM)"
values = {
"channel": channel,
"username": username,
"text": title + "\n" + info + "\n" + newStateReason + "\n" + "<" + log + "|AlarmState>",
"icon_emoji": icon_emoji
}
params = json.dumps(values).encode('utf8')
req = urllib.request.Request(slack_webhook_url, data=params, headers={
'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read())
| true | true |
f715530ab61caacad8fa0ce7435869a1c5c114aa | 417 | py | Python | codestorm_e_learning/asgi.py | Sahiladiv/PSST_CSHTN-08 | 0cd4a5b27f16d17a410b1e7cd2596038925f7070 | [
"MIT"
] | null | null | null | codestorm_e_learning/asgi.py | Sahiladiv/PSST_CSHTN-08 | 0cd4a5b27f16d17a410b1e7cd2596038925f7070 | [
"MIT"
] | null | null | null | codestorm_e_learning/asgi.py | Sahiladiv/PSST_CSHTN-08 | 0cd4a5b27f16d17a410b1e7cd2596038925f7070 | [
"MIT"
] | null | null | null | """
ASGI config for codestorm_e_learning project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codestorm_e_learning.settings')
application = get_asgi_application()
| 24.529412 | 80 | 0.798561 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codestorm_e_learning.settings')
application = get_asgi_application()
| true | true |
f71553a353729b9fcc5938672ff20758e7d08a89 | 1,886 | py | Python | ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py | golmschenk/ramjet | 77fb4481a15088923308fda09804d80455d1a9cf | [
"Apache-2.0"
] | 3 | 2020-11-23T18:47:37.000Z | 2021-08-05T17:45:51.000Z | ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py | golmschenk/ramjet | 77fb4481a15088923308fda09804d80455d1a9cf | [
"Apache-2.0"
] | 5 | 2021-08-19T00:54:57.000Z | 2022-02-10T00:15:40.000Z | ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py | golmschenk/ramjet | 77fb4481a15088923308fda09804d80455d1a9cf | [
"Apache-2.0"
] | 3 | 2019-07-12T21:00:57.000Z | 2020-06-03T22:18:13.000Z | """
Code for managing the TESS eclipsing binary metadata.
"""
import pandas as pd
from pathlib import Path
from peewee import IntegerField, SchemaManager
from ramjet.data_interface.metadatabase import MetadatabaseModel, metadatabase
brian_powell_eclipsing_binary_csv_path = Path('data/tess_eclipsing_binaries/TESS_EB_catalog_23Jun.csv')
class TessEclipsingBinaryMetadata(MetadatabaseModel):
"""
A model for the TESS eclipsing binary metadatabase table.
"""
tic_id = IntegerField(index=True, unique=True)
class TessEclipsingBinaryMetadataManager:
"""
A class for managing the TESS eclipsing binary metadata.
"""
@staticmethod
def build_table():
"""
Builds the TESS eclipsing binary metadata table.
"""
print('Building TESS eclipsing binary metadata table...')
eclipsing_binary_data_frame = pd.read_csv(brian_powell_eclipsing_binary_csv_path, usecols=['ID'])
row_count = 0
metadatabase.drop_tables([TessEclipsingBinaryMetadata])
metadatabase.create_tables([TessEclipsingBinaryMetadata])
SchemaManager(TessEclipsingBinaryMetadata).drop_indexes()
rows = []
for index, tic_id in enumerate(eclipsing_binary_data_frame['ID'].values):
row = {'tic_id': tic_id}
rows.append(row)
row_count += 1
if row_count % 1000 == 0:
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
rows = []
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
SchemaManager(TessEclipsingBinaryMetadata).create_indexes()
print(f'Table built. {row_count} rows added.')
if __name__ == '__main__':
metadata_manager = TessEclipsingBinaryMetadataManager()
metadata_manager.build_table()
| 34.925926 | 105 | 0.696713 | import pandas as pd
from pathlib import Path
from peewee import IntegerField, SchemaManager
from ramjet.data_interface.metadatabase import MetadatabaseModel, metadatabase
brian_powell_eclipsing_binary_csv_path = Path('data/tess_eclipsing_binaries/TESS_EB_catalog_23Jun.csv')
class TessEclipsingBinaryMetadata(MetadatabaseModel):
tic_id = IntegerField(index=True, unique=True)
class TessEclipsingBinaryMetadataManager:
@staticmethod
def build_table():
print('Building TESS eclipsing binary metadata table...')
eclipsing_binary_data_frame = pd.read_csv(brian_powell_eclipsing_binary_csv_path, usecols=['ID'])
row_count = 0
metadatabase.drop_tables([TessEclipsingBinaryMetadata])
metadatabase.create_tables([TessEclipsingBinaryMetadata])
SchemaManager(TessEclipsingBinaryMetadata).drop_indexes()
rows = []
for index, tic_id in enumerate(eclipsing_binary_data_frame['ID'].values):
row = {'tic_id': tic_id}
rows.append(row)
row_count += 1
if row_count % 1000 == 0:
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
rows = []
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
SchemaManager(TessEclipsingBinaryMetadata).create_indexes()
print(f'Table built. {row_count} rows added.')
if __name__ == '__main__':
metadata_manager = TessEclipsingBinaryMetadataManager()
metadata_manager.build_table()
| true | true |
f715544b39ad2bd0403cdd0c656584e7498e39cb | 508 | py | Python | instagram_profile/settings.py | barseghyanartur/django-instagram-profile | 1bb36551114d26e7c75f0ddf8f79db68fc02101e | [
"BSD-3-Clause"
] | 1 | 2020-12-03T22:01:27.000Z | 2020-12-03T22:01:27.000Z | instagram_profile/settings.py | barseghyanartur/django-instagram-profile | 1bb36551114d26e7c75f0ddf8f79db68fc02101e | [
"BSD-3-Clause"
] | null | null | null | instagram_profile/settings.py | barseghyanartur/django-instagram-profile | 1bb36551114d26e7c75f0ddf8f79db68fc02101e | [
"BSD-3-Clause"
] | null | null | null | from configparser import RawConfigParser
from django.conf import settings
env = RawConfigParser()
env.read(settings.BASE_DIR + '/env.ini')
INSTAGRAM_ACCOUNT = env['instagram']['account']
INSTAGRAM_AUTH_URL = env['instagram']['auth_url']
INSTAGRAM_ACCESS_TOKEN_URL = env['instagram']['access_token_url']
INSTAGRAM_APP_ID = env['instagram']['app_id']
INSTAGRAM_SECRET = env['instagram']['secret']
INSTAGRAM_REDIRECT_URL = env['instagram']['redirect_url']
INSTAGRAM_MEDIA_URL = env['instagram']['media_url']
| 33.866667 | 65 | 0.775591 | from configparser import RawConfigParser
from django.conf import settings
env = RawConfigParser()
env.read(settings.BASE_DIR + '/env.ini')
INSTAGRAM_ACCOUNT = env['instagram']['account']
INSTAGRAM_AUTH_URL = env['instagram']['auth_url']
INSTAGRAM_ACCESS_TOKEN_URL = env['instagram']['access_token_url']
INSTAGRAM_APP_ID = env['instagram']['app_id']
INSTAGRAM_SECRET = env['instagram']['secret']
INSTAGRAM_REDIRECT_URL = env['instagram']['redirect_url']
INSTAGRAM_MEDIA_URL = env['instagram']['media_url']
| true | true |
f715555b72413c60a579b8d7d74d37a54ae765af | 1,832 | py | Python | sorting/merge-sort-recursive.py | thehimalayanleo/Algorithm-Practice | aa63d90de7a3a72910ead1017574e2cca485009f | [
"MIT"
] | null | null | null | sorting/merge-sort-recursive.py | thehimalayanleo/Algorithm-Practice | aa63d90de7a3a72910ead1017574e2cca485009f | [
"MIT"
] | null | null | null | sorting/merge-sort-recursive.py | thehimalayanleo/Algorithm-Practice | aa63d90de7a3a72910ead1017574e2cca485009f | [
"MIT"
] | null | null | null | ## A recursive implementation of merge sort.
## Author: AJ
## test case 1 45 849 904 79 48942 7
class sorting:
def __init__(self):
self.arr = []
def get_data(self):
self.arr = list(map(int, input().split()))
return self.arr
def merge_sort(self, array):
if len(array) == 1:
return array
mid = len(array)//2 # Find the approximate middle point
# Separate the arrays using the middle point
left = self.merge_sort(array[:mid])
right = self.merge_sort(array[mid:])
left_indx = 0
right_indx = 0
complete_arr = []
# Iteratively combine the two arrays by sorting them appropriately
for indx in range(len(left) + len(right)):
if (left_indx < len(left)) and (right_indx < len(right)):
if (left[left_indx] < right[right_indx]):
complete_arr.append(left[left_indx])
left_indx+=1
else:
complete_arr.append(right[right_indx])
right_indx += 1
elif left_indx == len(left):
for indx2 in range(right_indx, len(right)):
complete_arr.append(right[indx2])
right_indx = len(right)
else:
for indx2 in range(left_indx, len(left)):
complete_arr.append(left[indx2])
left_indx = len(left)
#print(len(left)+len(right), len(complete_arr))
return complete_arr
def runner(self):
self.arr = self.merge_sort(self.arr)
def print_arr(self):
for ele in self.arr:
print(str(ele) + ' ', end='')
print('')
array = sorting()
array.get_data()
array.print_arr()
array.merge_sort(array.arr)
array.runner()
array.print_arr()
| 29.548387 | 74 | 0.554039 | self.arr = list(map(int, input().split()))
return self.arr
def merge_sort(self, array):
if len(array) == 1:
return array
mid = len(array)//2
left = self.merge_sort(array[:mid])
right = self.merge_sort(array[mid:])
left_indx = 0
right_indx = 0
complete_arr = []
for indx in range(len(left) + len(right)):
if (left_indx < len(left)) and (right_indx < len(right)):
if (left[left_indx] < right[right_indx]):
complete_arr.append(left[left_indx])
left_indx+=1
else:
complete_arr.append(right[right_indx])
right_indx += 1
elif left_indx == len(left):
for indx2 in range(right_indx, len(right)):
complete_arr.append(right[indx2])
right_indx = len(right)
else:
for indx2 in range(left_indx, len(left)):
complete_arr.append(left[indx2])
left_indx = len(left)
return complete_arr
def runner(self):
self.arr = self.merge_sort(self.arr)
def print_arr(self):
for ele in self.arr:
print(str(ele) + ' ', end='')
print('')
array = sorting()
array.get_data()
array.print_arr()
array.merge_sort(array.arr)
array.runner()
array.print_arr()
| true | true |
f71555608a70f602ee61e7b668ed75d79fe49531 | 2,393 | py | Python | pynmodl/tests/parsing/test_scoping.py | tjbanks/pynmodl | b7d6bb378711ce19cd651561c08146e3571d986a | [
"MIT"
] | 9 | 2017-06-03T19:33:46.000Z | 2019-10-27T22:19:37.000Z | pynmodl/tests/parsing/test_scoping.py | tjbanks/pynmodl | b7d6bb378711ce19cd651561c08146e3571d986a | [
"MIT"
] | 11 | 2017-10-13T16:09:10.000Z | 2019-05-08T16:37:11.000Z | pynmodl/tests/parsing/test_scoping.py | tjbanks/pynmodl | b7d6bb378711ce19cd651561c08146e3571d986a | [
"MIT"
] | 2 | 2017-08-29T14:29:45.000Z | 2019-12-31T19:45:02.000Z | import os
from textx.metamodel import metamodel_from_file
from textx.model import children_of_type
from pynmodl.nmodl import NModlCompiler
mm = metamodel_from_file(
os.path.join(os.path.dirname(__file__), '../../grammar/nmodl.tx'))
mm.register_obj_processors({'VarRef': NModlCompiler().handle_varref})
def refs_in(node):
return children_of_type('VarRef', node)
def test_scoping():
p = """
PARAMETER {
v (mV)
}
STATE { x }
INITIAL {
LOCAL v
v = 10
x = -v : v is local
}
FUNCTION f(v) {
if(2 > 1){
LOCAL v
v = 123
f = v : v is local
}
else{
f = -v : v is funcpar
}
}
DERIVATIVE dx {
x' = f(x) + v : v is par
}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, initial, function_f, derivative) = blocks
locals_in_init = children_of_type('Local', initial)
assert refs_in(initial)[0].var == locals_in_init[0]
locals_in_function_f = children_of_type('Local', function_f)
assert refs_in(function_f)[0].var == locals_in_function_f[0]
assert refs_in(function_f)[2].var == locals_in_function_f[0]
assert type(refs_in(function_f)[-1].var).__name__ == 'FuncPar'
assert refs_in(derivative)[-1].var == parameter.parameters[0]
def test_multiple_locals():
p = """
PARAMETER {
v (mV)
}
STATE { n }
FUNCTION alpha(x)(/ms){
LOCAL a
a = 0.1
if(fabs(x) > a){
alpha=a*x/(1-exp(-x))
}else{
alpha=a/(1-0.5*x)
}
}
DERIVATIVE dn {
LOCAL a
a = 10
n' = alpha((v + 55)/a)}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, alpha, dn) = blocks
locals_in_alpha = children_of_type('Local', alpha)
alpha_a = locals_in_alpha[0]
alpha_x = alpha.pars[0]
assert refs_in(alpha)[0].var == alpha_a # _a_ = 0.1
assert refs_in(alpha)[1].var == alpha_x # fabs(_x_) > a
assert refs_in(alpha)[2].var == alpha_a # fabs(x) > _a_
assert refs_in(alpha)[3].var == alpha # _alpha_=a*x/(1-exp(-x))
assert refs_in(alpha)[4].var == alpha_a # alpha=_a_*x/(1-exp(-x))
assert refs_in(alpha)[5].var == alpha_x # alpha=a*_x_/(1-exp(-x))
| 26.588889 | 70 | 0.55328 | import os
from textx.metamodel import metamodel_from_file
from textx.model import children_of_type
from pynmodl.nmodl import NModlCompiler
mm = metamodel_from_file(
os.path.join(os.path.dirname(__file__), '../../grammar/nmodl.tx'))
mm.register_obj_processors({'VarRef': NModlCompiler().handle_varref})
def refs_in(node):
return children_of_type('VarRef', node)
def test_scoping():
p = """
PARAMETER {
v (mV)
}
STATE { x }
INITIAL {
LOCAL v
v = 10
x = -v : v is local
}
FUNCTION f(v) {
if(2 > 1){
LOCAL v
v = 123
f = v : v is local
}
else{
f = -v : v is funcpar
}
}
DERIVATIVE dx {
x' = f(x) + v : v is par
}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, initial, function_f, derivative) = blocks
locals_in_init = children_of_type('Local', initial)
assert refs_in(initial)[0].var == locals_in_init[0]
locals_in_function_f = children_of_type('Local', function_f)
assert refs_in(function_f)[0].var == locals_in_function_f[0]
assert refs_in(function_f)[2].var == locals_in_function_f[0]
assert type(refs_in(function_f)[-1].var).__name__ == 'FuncPar'
assert refs_in(derivative)[-1].var == parameter.parameters[0]
def test_multiple_locals():
p = """
PARAMETER {
v (mV)
}
STATE { n }
FUNCTION alpha(x)(/ms){
LOCAL a
a = 0.1
if(fabs(x) > a){
alpha=a*x/(1-exp(-x))
}else{
alpha=a/(1-0.5*x)
}
}
DERIVATIVE dn {
LOCAL a
a = 10
n' = alpha((v + 55)/a)}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, alpha, dn) = blocks
locals_in_alpha = children_of_type('Local', alpha)
alpha_a = locals_in_alpha[0]
alpha_x = alpha.pars[0]
assert refs_in(alpha)[0].var == alpha_a
assert refs_in(alpha)[1].var == alpha_x
assert refs_in(alpha)[2].var == alpha_a
assert refs_in(alpha)[3].var == alpha
assert refs_in(alpha)[4].var == alpha_a
assert refs_in(alpha)[5].var == alpha_x
| true | true |
f715558281aaabbc79dcce8d745a1065f13cec44 | 27,919 | py | Python | chalice/cli/__init__.py | sw33tr0ll/chalice | 8c48771ef0fe2ae97a00e337ca5828e709b132d3 | [
"Apache-2.0"
] | 1 | 2020-12-19T07:34:28.000Z | 2020-12-19T07:34:28.000Z | chalice/cli/__init__.py | sw33tr0ll/chalice | 8c48771ef0fe2ae97a00e337ca5828e709b132d3 | [
"Apache-2.0"
] | 1 | 2020-12-20T21:08:52.000Z | 2020-12-20T21:08:52.000Z | chalice/cli/__init__.py | sw33tr0ll/chalice | 8c48771ef0fe2ae97a00e337ca5828e709b132d3 | [
"Apache-2.0"
] | null | null | null | """Command line interface for chalice.
Contains commands for deploying chalice.
"""
import logging
import os
import platform
import sys
import tempfile
import shutil
import traceback
import functools
import json
import botocore.exceptions
import click
from typing import Dict, Any, Optional, cast # noqa
from chalice import __version__ as chalice_version
from chalice.app import Chalice # noqa
from chalice.awsclient import TypedAWSClient
from chalice.awsclient import ReadTimeout
from chalice.cli.factory import CLIFactory
from chalice.cli.factory import NoSuchFunctionError
from chalice.config import Config # noqa
from chalice.logs import display_logs, LogRetrieveOptions
from chalice.utils import create_zip_file
from chalice.deploy.validate import validate_routes, validate_python_version
from chalice.deploy.validate import ExperimentalFeatureError
from chalice.utils import getting_started_prompt, UI, serialize_to_json
from chalice.constants import CONFIG_VERSION, TEMPLATE_APP, GITIGNORE
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.local import LocalDevServer # noqa
from chalice.constants import DEFAULT_HANDLER_NAME
from chalice.invoke import UnhandledLambdaError
from chalice.deploy.swagger import TemplatedSwaggerGenerator
from chalice.deploy.planner import PlanEncoder
from chalice.deploy.appgraph import ApplicationGraphBuilder, GraphPrettyPrint
def _configure_logging(level, format_string=None):
# type: (int, Optional[str]) -> None
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger('')
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def create_new_project_skeleton(project_name, profile=None):
# type: (str, Optional[str]) -> None
chalice_dir = os.path.join(project_name, '.chalice')
os.makedirs(chalice_dir)
config = os.path.join(project_name, '.chalice', 'config.json')
cfg = {
'version': CONFIG_VERSION,
'app_name': project_name,
'stages': {
DEFAULT_STAGE_NAME: {
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
}
}
}
if profile is not None:
cfg['profile'] = profile
with open(config, 'w') as f:
f.write(serialize_to_json(cfg))
with open(os.path.join(project_name, 'requirements.txt'), 'w'):
pass
with open(os.path.join(project_name, 'app.py'), 'w') as f:
f.write(TEMPLATE_APP % project_name)
with open(os.path.join(project_name, '.gitignore'), 'w') as f:
f.write(GITIGNORE)
def get_system_info():
# type: () -> str
python_info = "python {}.{}.{}".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
platform_system = platform.system().lower()
platform_release = platform.release()
platform_info = "{} {}".format(platform_system, platform_release)
return "{}, {}".format(python_info, platform_info)
@click.group()
@click.version_option(version=chalice_version,
message='%(prog)s %(version)s, {}'
.format(get_system_info()))
@click.option('--project-dir',
help='The project directory path (absolute or relative).'
'Defaults to CWD')
@click.option('--debug/--no-debug',
default=False,
help='Print debug logs to stderr.')
@click.pass_context
def cli(ctx, project_dir, debug=False):
# type: (click.Context, str, bool) -> None
if project_dir is None:
project_dir = os.getcwd()
elif not os.path.isabs(project_dir):
project_dir = os.path.abspath(project_dir)
if debug is True:
_configure_logging(logging.DEBUG)
_configure_cli_env_vars()
ctx.obj['project_dir'] = project_dir
ctx.obj['debug'] = debug
ctx.obj['factory'] = CLIFactory(project_dir, debug, environ=os.environ)
os.chdir(project_dir)
def _configure_cli_env_vars():
# type: () -> None
# This will set chalice specific env vars so users can detect if
# we're running a Chalice CLI command. This is useful if you want
# conditional behavior only when we're actually running in Lambda
# in your app.py file.
os.environ['AWS_CHALICE_CLI_MODE'] = 'true'
@cli.command()
@click.option('--host', default='127.0.0.1')
@click.option('--port', default=8000, type=click.INT)
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage for the local server to use.')
@click.option('--autoreload/--no-autoreload',
default=True,
help='Automatically restart server when code changes.')
@click.pass_context
def local(ctx, host='127.0.0.1', port=8000, stage=DEFAULT_STAGE_NAME,
autoreload=True):
# type: (click.Context, str, int, str, bool) -> None
factory = ctx.obj['factory'] # type: CLIFactory
from chalice.cli import reloader
# We don't create the server here because that will bind the
# socket and we only want to do this in the worker process.
server_factory = functools.partial(
create_local_server, factory, host, port, stage)
# When running `chalice local`, a stdout logger is configured
# so you'll see the same stdout logging as you would when
# running in lambda. This is configuring the root logger.
# The app-specific logger (app.log) will still continue
# to work.
logging.basicConfig(
stream=sys.stdout, level=logging.INFO, format='%(message)s')
if autoreload:
project_dir = factory.create_config_obj(
chalice_stage_name=stage).project_dir
rc = reloader.run_with_reloader(
server_factory, os.environ, project_dir)
# Click doesn't sys.exit() with the RC this function. The
# recommended way to do this is to use sys.exit() directly,
# see: https://github.com/pallets/click/issues/747
sys.exit(rc)
run_local_server(factory, host, port, stage)
def create_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> LocalDevServer
config = factory.create_config_obj(
chalice_stage_name=stage
)
app_obj = config.chalice_app
# Check that `chalice deploy` would let us deploy these routes, otherwise
# there is no point in testing locally.
routes = config.chalice_app.routes
validate_routes(routes)
server = factory.create_local_server(app_obj, config, host, port)
return server
def run_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> None
server = create_local_server(factory, host, port, stage)
server.serve_forever()
@cli.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.option('--connection-timeout',
type=int,
help=('Overrides the default botocore connection '
'timeout.'))
@click.pass_context
def deploy(ctx, autogen_policy, profile, api_gateway_stage, stage,
connection_timeout):
# type: (click.Context, Optional[bool], str, str, str, int) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session(
connection_timeout=connection_timeout)
ui = UI()
d = factory.create_default_deployer(session=session,
config=config,
ui=ui)
deployed_values = d.deploy(config, chalice_stage_name=stage)
reporter = factory.create_deployment_reporter(ui=ui)
reporter.display_report(deployed_values)
@cli.group()
def dev():
# type: () -> None
"""Development and debugging commands for chalice.
All the commands under the "chalice dev" namespace are provided
to help chalice developers introspect the internals of chalice.
They are also useful for users to better understand the chalice
deployment process.
These commands are provided for informational purposes only.
There is NO guarantee of backwards compatibility for any
"chalice dev" commands. Do not rely on the output of these commands.
These commands allow introspection of chalice internals, and the
internals of chalice are subject to change as needed.
"""
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def plan(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
"""Generate and display deployment plan.
This command will calculate and pretty print the deployment plan
without actually executing the plan. It's primarily used to better
understand the chalice deployment process.
"""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session()
ui = UI()
d = factory.create_plan_only_deployer(
session=session, config=config, ui=ui)
d.deploy(config, chalice_stage_name=stage)
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def appgraph(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
"""Generate and display the application graph."""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
graph_build = ApplicationGraphBuilder()
graph = graph_build.build(config, stage)
ui = UI()
GraphPrettyPrint(ui).display_graph(graph)
@cli.command('invoke')
@click.option('-n', '--name', metavar='NAME', required=True,
help=('The name of the function to invoke. '
'This is the logical name of the function. If the '
'function is decorated by app.route use the name '
'api_handler instead.'))
@click.option('--profile', metavar='PROFILE',
help='Override profile at deploy time.')
@click.option('--stage', metavar='STAGE', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def invoke(ctx, name, profile, stage):
# type: (click.Context, str, str, str) -> None
"""Invoke the deployed lambda function NAME.
Reads payload from STDIN.
"""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
try:
invoke_handler = factory.create_lambda_invoke_handler(name, stage)
payload = factory.create_stdin_reader().read()
invoke_handler.invoke(payload)
except NoSuchFunctionError as e:
err = click.ClickException(
"could not find a lambda function named %s." % e.name)
err.exit_code = 2
raise err
except botocore.exceptions.ClientError as e:
error = e.response['Error']
err = click.ClickException(
"got '%s' exception back from Lambda\n%s"
% (error['Code'], error['Message']))
err.exit_code = 1
raise err
except UnhandledLambdaError:
err = click.ClickException(
"Unhandled exception in Lambda function, details above.")
err.exit_code = 1
raise err
except ReadTimeout as e:
err = click.ClickException(e.message)
err.exit_code = 1
raise err
@cli.command('delete')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to delete.')
@click.pass_context
def delete(ctx, profile, stage):
# type: (click.Context, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(chalice_stage_name=stage)
session = factory.create_botocore_session()
d = factory.create_deletion_deployer(session=session, ui=UI())
d.deploy(config, chalice_stage_name=stage)
@cli.command()
@click.option('--num-entries', default=None, type=int,
help='Max number of log entries to show.')
@click.option('--include-lambda-messages/--no-include-lambda-messages',
default=False,
help='Controls whether or not lambda log messages are included.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get logs for.')
@click.option('-n', '--name',
help='The name of the lambda function to retrieve logs from.',
default=DEFAULT_HANDLER_NAME)
@click.option('-s', '--since',
help=('Only display logs since the provided time. If the '
'-f/--follow option is specified, then this value will '
'default to 10 minutes from the current time. Otherwise '
'by default all log messages are displayed. This value '
'can either be an ISO8601 formatted timestamp or a '
'relative time. For relative times provide a number '
'and a single unit. Units can be "s" for seconds, '
'"m" for minutes, "h" for hours, "d" for days, and "w" '
'for weeks. For example "5m" would indicate to display '
'logs starting five minutes in the past.'),
default=None)
@click.option('-f', '--follow/--no-follow',
default=False,
help=('Continuously poll for new log messages. Note that this '
'is a best effort attempt, and in certain cases can '
'miss log messages. This option is intended for '
'interactive usage only.'))
@click.option('--profile', help='The profile to use for fetching logs.')
@click.pass_context
def logs(ctx, num_entries, include_lambda_messages, stage,
name, since, follow, profile):
# type: (click.Context, int, bool, str, str, str, bool, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage, False)
deployed = config.deployed_resources(stage)
if name in deployed.resource_names():
lambda_arn = deployed.resource_values(name)['lambda_arn']
session = factory.create_botocore_session()
retriever = factory.create_log_retriever(
session, lambda_arn, follow)
options = LogRetrieveOptions.create(
max_entries=num_entries,
since=since,
include_lambda_messages=include_lambda_messages,
)
display_logs(retriever, sys.stdout, options)
@cli.command('gen-policy')
@click.option('--filename',
help='The filename to analyze. Otherwise app.py is assumed.')
@click.pass_context
def gen_policy(ctx, filename):
# type: (click.Context, str) -> None
from chalice import policy
if filename is None:
filename = os.path.join(ctx.obj['project_dir'], 'app.py')
if not os.path.isfile(filename):
click.echo("App file does not exist: %s" % filename, err=True)
raise click.Abort()
with open(filename) as f:
contents = f.read()
generated = policy.policy_from_source_code(contents)
click.echo(serialize_to_json(generated))
@cli.command('new-project')
@click.argument('project_name', required=False)
@click.option('--profile', required=False)
def new_project(project_name, profile):
# type: (str, str) -> None
if project_name is None:
project_name = getting_started_prompt(click)
if os.path.isdir(project_name):
click.echo("Directory already exists: %s" % project_name, err=True)
raise click.Abort()
create_new_project_skeleton(project_name, profile)
validate_python_version(Config.create())
@cli.command('url')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get the deployed URL for.')
@click.pass_context
def url(ctx, stage):
# type: (click.Context, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
click.echo(deployed.resource_values('rest_api')['rest_api_url'])
else:
e = click.ClickException(
"Could not find a record of a Rest API in chalice stage: '%s'"
% stage)
e.exit_code = 2
raise e
@cli.command('generate-sdk')
@click.option('--sdk-type', default='javascript',
type=click.Choice(['javascript']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to generate an SDK for.')
@click.argument('outdir')
@click.pass_context
def generate_sdk(ctx, sdk_type, stage, outdir):
# type: (click.Context, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
session = factory.create_botocore_session()
client = TypedAWSClient(session)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
rest_api_id = deployed.resource_values('rest_api')['rest_api_id']
api_gateway_stage = config.api_gateway_stage
client.download_sdk(rest_api_id, outdir,
api_gateway_stage=api_gateway_stage,
sdk_type=sdk_type)
else:
click.echo("Could not find API ID, has this application "
"been deployed?", err=True)
raise click.Abort()
@cli.command('generate-models')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage for which to generate models.")
@click.pass_context
def generate_models(ctx, stage):
# type: (click.Context, str) -> None
"""Generate a model from Chalice routes.
Currently only supports generating Swagger 2.0 models.
"""
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
if not config.chalice_app.routes:
click.echo('No REST API found to generate model from.')
raise click.Abort()
swagger_generator = TemplatedSwaggerGenerator()
model = swagger_generator.generate_swagger(
config.chalice_app,
)
ui = UI()
ui.write(json.dumps(model, indent=4, cls=PlanEncoder))
ui.write('\n')
@cli.command('package')
@click.option('--pkg-format', default='cloudformation',
help=('Specify the provisioning engine to use for '
'template output. Chalice supports both '
'CloudFormation and Terraform. Default '
'is CloudFormation.'),
type=click.Choice(['cloudformation', 'terraform']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage to package.")
@click.option('--single-file', is_flag=True,
default=False,
help=("Create a single packaged file. "
"By default, the 'out' argument "
"specifies a directory in which the "
"package assets will be placed. If "
"this argument is specified, a single "
"zip file will be created instead. CloudFormation Only."))
@click.option('--merge-template',
help=('Specify a JSON or YAML template to be merged '
'into the generated template. This is useful '
'for adding resources to a Chalice template or '
'modify values in the template. CloudFormation Only.'))
@click.option('--template-format', default='json',
type=click.Choice(['json', 'yaml'], case_sensitive=False),
help=('Specify if the generated template should be serialized '
'as either JSON or YAML. CloudFormation only.'))
@click.option('--profile', help='Override profile at packaging time.')
@click.argument('out')
@click.pass_context
def package(ctx, single_file, stage, merge_template,
out, pkg_format, template_format, profile):
# type: (click.Context, bool, str, str, str, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage)
options = factory.create_package_options()
packager = factory.create_app_packager(config, options,
pkg_format,
template_format,
merge_template)
if pkg_format == 'terraform' and (merge_template or
single_file or
template_format != 'json'):
# I don't see any reason we couldn't support --single-file for
# terraform if we wanted to.
click.echo((
"Terraform format does not support "
"--merge-template, --single-file, or --template-format"))
raise click.Abort()
if single_file:
dirname = tempfile.mkdtemp()
try:
packager.package_app(config, dirname, stage)
create_zip_file(source_dir=dirname, outfile=out)
finally:
shutil.rmtree(dirname)
else:
packager.package_app(config, out, stage)
@cli.command('generate-pipeline')
@click.option('--pipeline-version',
default='v1',
type=click.Choice(['v1', 'v2']),
help='Which version of the pipeline template to generate.')
@click.option('-i', '--codebuild-image',
help=("Specify default codebuild image to use. "
"This option must be provided when using a python "
"version besides 2.7."))
@click.option('-s', '--source', default='codecommit',
type=click.Choice(['codecommit', 'github']),
help=("Specify the input source. The default value of "
"'codecommit' will create a CodeCommit repository "
"for you. The 'github' value allows you to "
"reference an existing GitHub repository."))
@click.option('-b', '--buildspec-file',
help=("Specify path for buildspec.yml file. "
"By default, the build steps are included in the "
"generated cloudformation template. If this option "
"is provided, a buildspec.yml will be generated "
"as a separate file and not included in the cfn "
"template. This allows you to make changes to how "
"the project is built without having to redeploy "
"a CloudFormation template. This file should be "
"named 'buildspec.yml' and placed in the root "
"directory of your app."))
@click.argument('filename')
@click.pass_context
def generate_pipeline(ctx, pipeline_version, codebuild_image, source,
buildspec_file, filename):
# type: (click.Context, str, str, str, str, str) -> None
"""Generate a cloudformation template for a starter CD pipeline.
This command will write a starter cloudformation template to
the filename you provide. It contains a CodeCommit repo,
a CodeBuild stage for packaging your chalice app, and a
CodePipeline stage to deploy your application using cloudformation.
You can use any AWS SDK or the AWS CLI to deploy this stack.
Here's an example using the AWS CLI:
\b
$ chalice generate-pipeline pipeline.json
$ aws cloudformation deploy --stack-name mystack \b
--template-file pipeline.json --capabilities CAPABILITY_IAM
"""
from chalice import pipeline
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj()
p = cast(pipeline.BasePipelineTemplate, None)
if pipeline_version == 'v1':
p = pipeline.CreatePipelineTemplateLegacy()
else:
p = pipeline.CreatePipelineTemplateV2()
params = pipeline.PipelineParameters(
app_name=config.app_name,
lambda_python_version=config.lambda_python_version,
codebuild_image=codebuild_image,
code_source=source,
pipeline_version=pipeline_version,
)
output = p.create_template(params)
if buildspec_file:
extractor = pipeline.BuildSpecExtractor()
buildspec_contents = extractor.extract_buildspec(output)
with open(buildspec_file, 'w') as f:
f.write(buildspec_contents)
with open(filename, 'w') as f:
f.write(serialize_to_json(output))
def main():
# type: () -> int
# click's dynamic attrs will allow us to pass through
# 'obj' via the context object, so we're ignoring
# these error messages from pylint because we know it's ok.
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
try:
return cli(obj={})
except botocore.exceptions.NoRegionError:
click.echo("No region configured. "
"Either export the AWS_DEFAULT_REGION "
"environment variable or set the "
"region value in our ~/.aws/config file.", err=True)
return 2
except ExperimentalFeatureError as e:
click.echo(str(e))
return 2
except Exception:
click.echo(traceback.format_exc(), err=True)
return 2
| 41.732436 | 79 | 0.647767 | import logging
import os
import platform
import sys
import tempfile
import shutil
import traceback
import functools
import json
import botocore.exceptions
import click
from typing import Dict, Any, Optional, cast
from chalice import __version__ as chalice_version
from chalice.app import Chalice
from chalice.awsclient import TypedAWSClient
from chalice.awsclient import ReadTimeout
from chalice.cli.factory import CLIFactory
from chalice.cli.factory import NoSuchFunctionError
from chalice.config import Config
from chalice.logs import display_logs, LogRetrieveOptions
from chalice.utils import create_zip_file
from chalice.deploy.validate import validate_routes, validate_python_version
from chalice.deploy.validate import ExperimentalFeatureError
from chalice.utils import getting_started_prompt, UI, serialize_to_json
from chalice.constants import CONFIG_VERSION, TEMPLATE_APP, GITIGNORE
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.local import LocalDevServer
from chalice.constants import DEFAULT_HANDLER_NAME
from chalice.invoke import UnhandledLambdaError
from chalice.deploy.swagger import TemplatedSwaggerGenerator
from chalice.deploy.planner import PlanEncoder
from chalice.deploy.appgraph import ApplicationGraphBuilder, GraphPrettyPrint
def _configure_logging(level, format_string=None):
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger('')
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def create_new_project_skeleton(project_name, profile=None):
chalice_dir = os.path.join(project_name, '.chalice')
os.makedirs(chalice_dir)
config = os.path.join(project_name, '.chalice', 'config.json')
cfg = {
'version': CONFIG_VERSION,
'app_name': project_name,
'stages': {
DEFAULT_STAGE_NAME: {
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
}
}
}
if profile is not None:
cfg['profile'] = profile
with open(config, 'w') as f:
f.write(serialize_to_json(cfg))
with open(os.path.join(project_name, 'requirements.txt'), 'w'):
pass
with open(os.path.join(project_name, 'app.py'), 'w') as f:
f.write(TEMPLATE_APP % project_name)
with open(os.path.join(project_name, '.gitignore'), 'w') as f:
f.write(GITIGNORE)
def get_system_info():
python_info = "python {}.{}.{}".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
platform_system = platform.system().lower()
platform_release = platform.release()
platform_info = "{} {}".format(platform_system, platform_release)
return "{}, {}".format(python_info, platform_info)
@click.group()
@click.version_option(version=chalice_version,
message='%(prog)s %(version)s, {}'
.format(get_system_info()))
@click.option('--project-dir',
help='The project directory path (absolute or relative).'
'Defaults to CWD')
@click.option('--debug/--no-debug',
default=False,
help='Print debug logs to stderr.')
@click.pass_context
def cli(ctx, project_dir, debug=False):
if project_dir is None:
project_dir = os.getcwd()
elif not os.path.isabs(project_dir):
project_dir = os.path.abspath(project_dir)
if debug is True:
_configure_logging(logging.DEBUG)
_configure_cli_env_vars()
ctx.obj['project_dir'] = project_dir
ctx.obj['debug'] = debug
ctx.obj['factory'] = CLIFactory(project_dir, debug, environ=os.environ)
os.chdir(project_dir)
def _configure_cli_env_vars():
# conditional behavior only when we're actually running in Lambda
os.environ['AWS_CHALICE_CLI_MODE'] = 'true'
@cli.command()
@click.option('--host', default='127.0.0.1')
@click.option('--port', default=8000, type=click.INT)
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage for the local server to use.')
@click.option('--autoreload/--no-autoreload',
default=True,
help='Automatically restart server when code changes.')
@click.pass_context
def local(ctx, host='127.0.0.1', port=8000, stage=DEFAULT_STAGE_NAME,
autoreload=True):
factory = ctx.obj['factory']
from chalice.cli import reloader
# socket and we only want to do this in the worker process.
server_factory = functools.partial(
create_local_server, factory, host, port, stage)
# When running `chalice local`, a stdout logger is configured
# so you'll see the same stdout logging as you would when
logging.basicConfig(
stream=sys.stdout, level=logging.INFO, format='%(message)s')
if autoreload:
project_dir = factory.create_config_obj(
chalice_stage_name=stage).project_dir
rc = reloader.run_with_reloader(
server_factory, os.environ, project_dir)
# recommended way to do this is to use sys.exit() directly,
# see: https://github.com/pallets/click/issues/747
sys.exit(rc)
run_local_server(factory, host, port, stage)
def create_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> LocalDevServer
config = factory.create_config_obj(
chalice_stage_name=stage
)
app_obj = config.chalice_app
# Check that `chalice deploy` would let us deploy these routes, otherwise
# there is no point in testing locally.
routes = config.chalice_app.routes
validate_routes(routes)
server = factory.create_local_server(app_obj, config, host, port)
return server
def run_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> None
server = create_local_server(factory, host, port, stage)
server.serve_forever()
@cli.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.option('--connection-timeout',
type=int,
help=('Overrides the default botocore connection '
'timeout.'))
@click.pass_context
def deploy(ctx, autogen_policy, profile, api_gateway_stage, stage,
connection_timeout):
# type: (click.Context, Optional[bool], str, str, str, int) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session(
connection_timeout=connection_timeout)
ui = UI()
d = factory.create_default_deployer(session=session,
config=config,
ui=ui)
deployed_values = d.deploy(config, chalice_stage_name=stage)
reporter = factory.create_deployment_reporter(ui=ui)
reporter.display_report(deployed_values)
@cli.group()
def dev():
# type: () -> None
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def plan(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session()
ui = UI()
d = factory.create_plan_only_deployer(
session=session, config=config, ui=ui)
d.deploy(config, chalice_stage_name=stage)
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def appgraph(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
graph_build = ApplicationGraphBuilder()
graph = graph_build.build(config, stage)
ui = UI()
GraphPrettyPrint(ui).display_graph(graph)
@cli.command('invoke')
@click.option('-n', '--name', metavar='NAME', required=True,
help=('The name of the function to invoke. '
'This is the logical name of the function. If the '
'function is decorated by app.route use the name '
'api_handler instead.'))
@click.option('--profile', metavar='PROFILE',
help='Override profile at deploy time.')
@click.option('--stage', metavar='STAGE', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def invoke(ctx, name, profile, stage):
# type: (click.Context, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
try:
invoke_handler = factory.create_lambda_invoke_handler(name, stage)
payload = factory.create_stdin_reader().read()
invoke_handler.invoke(payload)
except NoSuchFunctionError as e:
err = click.ClickException(
"could not find a lambda function named %s." % e.name)
err.exit_code = 2
raise err
except botocore.exceptions.ClientError as e:
error = e.response['Error']
err = click.ClickException(
"got '%s' exception back from Lambda\n%s"
% (error['Code'], error['Message']))
err.exit_code = 1
raise err
except UnhandledLambdaError:
err = click.ClickException(
"Unhandled exception in Lambda function, details above.")
err.exit_code = 1
raise err
except ReadTimeout as e:
err = click.ClickException(e.message)
err.exit_code = 1
raise err
@cli.command('delete')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to delete.')
@click.pass_context
def delete(ctx, profile, stage):
# type: (click.Context, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(chalice_stage_name=stage)
session = factory.create_botocore_session()
d = factory.create_deletion_deployer(session=session, ui=UI())
d.deploy(config, chalice_stage_name=stage)
@cli.command()
@click.option('--num-entries', default=None, type=int,
help='Max number of log entries to show.')
@click.option('--include-lambda-messages/--no-include-lambda-messages',
default=False,
help='Controls whether or not lambda log messages are included.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get logs for.')
@click.option('-n', '--name',
help='The name of the lambda function to retrieve logs from.',
default=DEFAULT_HANDLER_NAME)
@click.option('-s', '--since',
help=('Only display logs since the provided time. If the '
'-f/--follow option is specified, then this value will '
'default to 10 minutes from the current time. Otherwise '
'by default all log messages are displayed. This value '
'can either be an ISO8601 formatted timestamp or a '
'relative time. For relative times provide a number '
'and a single unit. Units can be "s" for seconds, '
'"m" for minutes, "h" for hours, "d" for days, and "w" '
'for weeks. For example "5m" would indicate to display '
'logs starting five minutes in the past.'),
default=None)
@click.option('-f', '--follow/--no-follow',
default=False,
help=('Continuously poll for new log messages. Note that this '
'is a best effort attempt, and in certain cases can '
'miss log messages. This option is intended for '
'interactive usage only.'))
@click.option('--profile', help='The profile to use for fetching logs.')
@click.pass_context
def logs(ctx, num_entries, include_lambda_messages, stage,
name, since, follow, profile):
# type: (click.Context, int, bool, str, str, str, bool, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage, False)
deployed = config.deployed_resources(stage)
if name in deployed.resource_names():
lambda_arn = deployed.resource_values(name)['lambda_arn']
session = factory.create_botocore_session()
retriever = factory.create_log_retriever(
session, lambda_arn, follow)
options = LogRetrieveOptions.create(
max_entries=num_entries,
since=since,
include_lambda_messages=include_lambda_messages,
)
display_logs(retriever, sys.stdout, options)
@cli.command('gen-policy')
@click.option('--filename',
help='The filename to analyze. Otherwise app.py is assumed.')
@click.pass_context
def gen_policy(ctx, filename):
# type: (click.Context, str) -> None
from chalice import policy
if filename is None:
filename = os.path.join(ctx.obj['project_dir'], 'app.py')
if not os.path.isfile(filename):
click.echo("App file does not exist: %s" % filename, err=True)
raise click.Abort()
with open(filename) as f:
contents = f.read()
generated = policy.policy_from_source_code(contents)
click.echo(serialize_to_json(generated))
@cli.command('new-project')
@click.argument('project_name', required=False)
@click.option('--profile', required=False)
def new_project(project_name, profile):
# type: (str, str) -> None
if project_name is None:
project_name = getting_started_prompt(click)
if os.path.isdir(project_name):
click.echo("Directory already exists: %s" % project_name, err=True)
raise click.Abort()
create_new_project_skeleton(project_name, profile)
validate_python_version(Config.create())
@cli.command('url')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get the deployed URL for.')
@click.pass_context
def url(ctx, stage):
# type: (click.Context, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
click.echo(deployed.resource_values('rest_api')['rest_api_url'])
else:
e = click.ClickException(
"Could not find a record of a Rest API in chalice stage: '%s'"
% stage)
e.exit_code = 2
raise e
@cli.command('generate-sdk')
@click.option('--sdk-type', default='javascript',
type=click.Choice(['javascript']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to generate an SDK for.')
@click.argument('outdir')
@click.pass_context
def generate_sdk(ctx, sdk_type, stage, outdir):
# type: (click.Context, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
session = factory.create_botocore_session()
client = TypedAWSClient(session)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
rest_api_id = deployed.resource_values('rest_api')['rest_api_id']
api_gateway_stage = config.api_gateway_stage
client.download_sdk(rest_api_id, outdir,
api_gateway_stage=api_gateway_stage,
sdk_type=sdk_type)
else:
click.echo("Could not find API ID, has this application "
"been deployed?", err=True)
raise click.Abort()
@cli.command('generate-models')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage for which to generate models.")
@click.pass_context
def generate_models(ctx, stage):
# type: (click.Context, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
if not config.chalice_app.routes:
click.echo('No REST API found to generate model from.')
raise click.Abort()
swagger_generator = TemplatedSwaggerGenerator()
model = swagger_generator.generate_swagger(
config.chalice_app,
)
ui = UI()
ui.write(json.dumps(model, indent=4, cls=PlanEncoder))
ui.write('\n')
@cli.command('package')
@click.option('--pkg-format', default='cloudformation',
help=('Specify the provisioning engine to use for '
'template output. Chalice supports both '
'CloudFormation and Terraform. Default '
'is CloudFormation.'),
type=click.Choice(['cloudformation', 'terraform']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage to package.")
@click.option('--single-file', is_flag=True,
default=False,
help=("Create a single packaged file. "
"By default, the 'out' argument "
"specifies a directory in which the "
"package assets will be placed. If "
"this argument is specified, a single "
"zip file will be created instead. CloudFormation Only."))
@click.option('--merge-template',
help=('Specify a JSON or YAML template to be merged '
'into the generated template. This is useful '
'for adding resources to a Chalice template or '
'modify values in the template. CloudFormation Only.'))
@click.option('--template-format', default='json',
type=click.Choice(['json', 'yaml'], case_sensitive=False),
help=('Specify if the generated template should be serialized '
'as either JSON or YAML. CloudFormation only.'))
@click.option('--profile', help='Override profile at packaging time.')
@click.argument('out')
@click.pass_context
def package(ctx, single_file, stage, merge_template,
out, pkg_format, template_format, profile):
# type: (click.Context, bool, str, str, str, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage)
options = factory.create_package_options()
packager = factory.create_app_packager(config, options,
pkg_format,
template_format,
merge_template)
if pkg_format == 'terraform' and (merge_template or
single_file or
template_format != 'json'):
# I don't see any reason we couldn't support --single-file for
# terraform if we wanted to.
click.echo((
"Terraform format does not support "
"--merge-template, --single-file, or --template-format"))
raise click.Abort()
if single_file:
dirname = tempfile.mkdtemp()
try:
packager.package_app(config, dirname, stage)
create_zip_file(source_dir=dirname, outfile=out)
finally:
shutil.rmtree(dirname)
else:
packager.package_app(config, out, stage)
@cli.command('generate-pipeline')
@click.option('--pipeline-version',
default='v1',
type=click.Choice(['v1', 'v2']),
help='Which version of the pipeline template to generate.')
@click.option('-i', '--codebuild-image',
help=("Specify default codebuild image to use. "
"This option must be provided when using a python "
"version besides 2.7."))
@click.option('-s', '--source', default='codecommit',
type=click.Choice(['codecommit', 'github']),
help=("Specify the input source. The default value of "
"'codecommit' will create a CodeCommit repository "
"for you. The 'github' value allows you to "
"reference an existing GitHub repository."))
@click.option('-b', '--buildspec-file',
help=("Specify path for buildspec.yml file. "
"By default, the build steps are included in the "
"generated cloudformation template. If this option "
"is provided, a buildspec.yml will be generated "
"as a separate file and not included in the cfn "
"template. This allows you to make changes to how "
"the project is built without having to redeploy "
"a CloudFormation template. This file should be "
"named 'buildspec.yml' and placed in the root "
"directory of your app."))
@click.argument('filename')
@click.pass_context
def generate_pipeline(ctx, pipeline_version, codebuild_image, source,
buildspec_file, filename):
# type: (click.Context, str, str, str, str, str) -> None
from chalice import pipeline
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj()
p = cast(pipeline.BasePipelineTemplate, None)
if pipeline_version == 'v1':
p = pipeline.CreatePipelineTemplateLegacy()
else:
p = pipeline.CreatePipelineTemplateV2()
params = pipeline.PipelineParameters(
app_name=config.app_name,
lambda_python_version=config.lambda_python_version,
codebuild_image=codebuild_image,
code_source=source,
pipeline_version=pipeline_version,
)
output = p.create_template(params)
if buildspec_file:
extractor = pipeline.BuildSpecExtractor()
buildspec_contents = extractor.extract_buildspec(output)
with open(buildspec_file, 'w') as f:
f.write(buildspec_contents)
with open(filename, 'w') as f:
f.write(serialize_to_json(output))
def main():
# type: () -> int
# click's dynamic attrs will allow us to pass through
# these error messages from pylint because we know it's ok.
try:
return cli(obj={})
except botocore.exceptions.NoRegionError:
click.echo("No region configured. "
"Either export the AWS_DEFAULT_REGION "
"environment variable or set the "
"region value in our ~/.aws/config file.", err=True)
return 2
except ExperimentalFeatureError as e:
click.echo(str(e))
return 2
except Exception:
click.echo(traceback.format_exc(), err=True)
return 2
| true | true |
f715566e418c809bc95f0d6c57bc79a0f14a15b4 | 1,920 | py | Python | nanobox_libcloud/tasks/azure_arm.py | mu-box/nanobox-adapter-libcloud | a8606799a4899c3e771f24467b61cc09a49f0d55 | [
"MIT"
] | 4 | 2017-08-26T16:26:02.000Z | 2017-11-10T02:20:32.000Z | nanobox_libcloud/tasks/azure_arm.py | mu-box/nanobox-adapter-libcloud | a8606799a4899c3e771f24467b61cc09a49f0d55 | [
"MIT"
] | 9 | 2017-09-12T20:26:07.000Z | 2019-04-01T18:08:28.000Z | nanobox_libcloud/tasks/azure_arm.py | mu-box/nanobox-adapter-libcloud | a8606799a4899c3e771f24467b61cc09a49f0d55 | [
"MIT"
] | 7 | 2017-09-16T09:00:48.000Z | 2021-03-01T04:28:39.000Z | from nanobox_libcloud import celery
from nanobox_libcloud import adapters
from time import sleep
import logging
@celery.task
def azure_destroy_arm(creds, name):
logger = logging.getLogger(__name__)
self = adapters.azure_arm.AzureARM()
driver = self._get_user_driver(**creds)
logger.info('Destroying server, NIC, public IP, and VHD...')
if driver.destroy_node(self._find_server(driver, name), ex_destroy_ip=True):
logger.info('Ensuring server was destroyed...')
while self._find_server(driver, name) is not None:
sleep(0.5)
app = name.rsplit('-', 1)[0]
if len(driver.list_nodes(app)) < 1:
logger.info('Destroying virtual network...')
net = self._find_network(driver, app)
while True:
try:
driver.ex_delete_network(net.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "is in use" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_network(driver, app):
sleep(0.5)
logger.info('Destroying resource group...')
group = self._find_resource_group(driver, app)
while True:
try:
driver.ex_delete_resource_group(group.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "InUse" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_resource_group(driver, app):
sleep(0.5)
| 36.226415 | 80 | 0.519792 | from nanobox_libcloud import celery
from nanobox_libcloud import adapters
from time import sleep
import logging
@celery.task
def azure_destroy_arm(creds, name):
logger = logging.getLogger(__name__)
self = adapters.azure_arm.AzureARM()
driver = self._get_user_driver(**creds)
logger.info('Destroying server, NIC, public IP, and VHD...')
if driver.destroy_node(self._find_server(driver, name), ex_destroy_ip=True):
logger.info('Ensuring server was destroyed...')
while self._find_server(driver, name) is not None:
sleep(0.5)
app = name.rsplit('-', 1)[0]
if len(driver.list_nodes(app)) < 1:
logger.info('Destroying virtual network...')
net = self._find_network(driver, app)
while True:
try:
driver.ex_delete_network(net.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "is in use" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_network(driver, app):
sleep(0.5)
logger.info('Destroying resource group...')
group = self._find_resource_group(driver, app)
while True:
try:
driver.ex_delete_resource_group(group.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "InUse" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_resource_group(driver, app):
sleep(0.5)
| true | true |
f715567b1c04fc53d84059088a5b453d461d55e7 | 1,293 | py | Python | airbyte-integrations/connectors/source-hubspot/main_dev.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 2 | 2021-03-02T09:17:41.000Z | 2021-03-02T11:02:23.000Z | airbyte-integrations/connectors/source-hubspot/main_dev.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 52 | 2021-06-11T12:39:05.000Z | 2022-03-30T04:59:35.000Z | airbyte-integrations/connectors/source-hubspot/main_dev.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 2 | 2021-12-14T17:15:40.000Z | 2021-12-14T17:18:03.000Z | #
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
from base_python.entrypoint import launch
from source_hubspot import SourceHubspot
if __name__ == "__main__":
source = SourceHubspot()
launch(source, sys.argv[1:])
| 38.029412 | 80 | 0.771075 |
import sys
from base_python.entrypoint import launch
from source_hubspot import SourceHubspot
if __name__ == "__main__":
source = SourceHubspot()
launch(source, sys.argv[1:])
| true | true |
f7155888da8319908c50672d71d366b286e97a8a | 8,907 | py | Python | apis/nb/clients/inventory_manager_client/models/LicenseInfoDTO.py | CiscoDevNet/APIC-EM-Generic-Scripts- | 74211d9488f1e77cf56ef86dba20ec8e8eb49cc1 | [
"ECL-2.0",
"Apache-2.0"
] | 45 | 2016-06-09T15:41:25.000Z | 2019-08-06T17:13:11.000Z | apis/nb/clients/inventory_manager_client/models/LicenseInfoDTO.py | CiscoDevNet/APIC-EM-Generic-Scripts | 74211d9488f1e77cf56ef86dba20ec8e8eb49cc1 | [
"ECL-2.0",
"Apache-2.0"
] | 36 | 2016-06-12T03:03:56.000Z | 2017-03-13T18:20:11.000Z | apis/nb/clients/inventory_manager_client/models/LicenseInfoDTO.py | CiscoDevNet/APIC-EM-Generic-Scripts | 74211d9488f1e77cf56ef86dba20ec8e8eb49cc1 | [
"ECL-2.0",
"Apache-2.0"
] | 15 | 2016-06-22T03:51:37.000Z | 2019-07-10T10:06:02.000Z | #!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class LicenseInfoDTO(object):
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'name': 'str',
'priority': 'str',
'type': 'str',
'description': 'str',
'validityPeriodRemaining': 'int',
'maxUsageCount': 'int',
'eulaStatus': 'bool',
'validityPeriod': 'int',
'usageCount': 'int',
'physicalIndex': 'str',
'licenseIndex': 'int',
'featureVersion': 'str',
'counted': 'bool',
'totalCount': 'int',
'provisionState': 'int',
'parentId': 'int',
'expiredPeriod': 'int',
'usageCountRemaining': 'int',
'status': 'str',
'id': 'str',
'deployPending': 'int',
'hostId': 'str',
'evalPeriodLeft': 'str',
'evalPeriodUsed': 'str',
'expiredDate': 'str',
'isCounted': 'bool',
'isEulaAccepted': 'bool',
'isEulaApplicable': 'bool',
'isTechnologyLicense': 'bool',
'licenseFileCount': 'int',
'licenseFileName': 'str',
'storeName': 'str',
'storedUsed': 'int',
'attributeInfo': 'dict'
}
self.attributeMap = {
'name': 'name',
'priority': 'priority',
'type': 'type',
'description': 'description',
'validityPeriodRemaining': 'validityPeriodRemaining',
'maxUsageCount': 'maxUsageCount',
'eulaStatus': 'eulaStatus',
'validityPeriod': 'validityPeriod',
'usageCount': 'usageCount',
'physicalIndex': 'physicalIndex',
'licenseIndex': 'licenseIndex',
'featureVersion': 'featureVersion',
'counted': 'counted',
'totalCount': 'totalCount',
'provisionState': 'provisionState',
'parentId': 'parentId',
'expiredPeriod': 'expiredPeriod',
'usageCountRemaining': 'usageCountRemaining',
'status': 'status',
'id': 'id',
'deployPending': 'deployPending',
'hostId': 'hostId',
'evalPeriodLeft': 'evalPeriodLeft',
'evalPeriodUsed': 'evalPeriodUsed',
'expiredDate': 'expiredDate',
'isCounted': 'isCounted',
'isEulaAccepted': 'isEulaAccepted',
'isEulaApplicable': 'isEulaApplicable',
'isTechnologyLicense': 'isTechnologyLicense',
'licenseFileCount': 'licenseFileCount',
'licenseFileName': 'licenseFileName',
'storeName': 'storeName',
'storedUsed': 'storedUsed',
'attributeInfo': 'attributeInfo'
}
#Name of the feature that is using or can use this license. Ex: 'IPBASE', 'ADVIPSERVICE'
self.name = None # str
#License priority
self.priority = None # str
#Type of license based on the validity period
self.type = None # str
#Description about the license. It is populated with comments from the license file
self.description = None # str
#Time period remaining before the license expires or transitions to rightToUse(9) license. Value will be in milliseconds
self.validityPeriodRemaining = None # int
#Maximum number of entities that can use this license
self.maxUsageCount = None # int
#Whether the user accepted end user license agreement for this license. Values are true(1) - EULA accepted, false(2) - EULA not accepted
self.eulaStatus = None # bool
#Time period the license is valid for. Value will be in milliseconds
self.validityPeriod = None # int
#Number of current usages of this licensed feature
self.usageCount = None # int
#Physical entity index
self.physicalIndex = None # str
#Index of the license to uniquely identify a license within the device
self.licenseIndex = None # int
#Version of the feature that is using or can use this license. Ex: '1.0', '2.0'
self.featureVersion = None # str
#If license feature is counted as part of the license. Values are true(1) - counted license, false(2) - uncounted license
self.counted = None # bool
#Total number of this licensed feature
self.totalCount = None # int
#Provision state of the license feature
self.provisionState = None # int
#Parent Id of the license
self.parentId = None # int
#Time period after the license expires. Value will be in milliseconds
self.expiredPeriod = None # int
#Number of entities that can still use this license
self.usageCountRemaining = None # int
#Status of the license
self.status = None # str
#Id of the license
self.id = None # str
#Deploy Pending information of license
self.deployPending = None # int
#An administratively-assigned fully-qualified domain name for this managed node
self.hostId = None # str
#Number of days remaining in the eval period
self.evalPeriodLeft = None # str
#Number of days used in the eval period
self.evalPeriodUsed = None # str
#Expired date of the license
self.expiredDate = None # str
#Whether the license is counted license. Values are true(1) - counted license,false(2) - uncounted license
self.isCounted = None # bool
#This field is based on eulaStatus. Ex: If eulaStatus is true then it will be accepted else false
self.isEulaAccepted = None # bool
#This field is based on eulaStatus. Ex: If eulaStatus is true then it will be applicable else false
self.isEulaApplicable = None # bool
#Whether the license is technology license. Values are true(1) - technology license,false(2) - nontechnology license
self.isTechnologyLicense = None # bool
#Number of installed license file in this feature
self.licenseFileCount = None # int
#Installed License file name
self.licenseFileName = None # str
#Name of the license store within the device. Ex: 'disk1:lic_store_1.txt' or 'flash:lic_store_2.txt
self.storeName = None # str
#License store that is used for storing this license
self.storedUsed = None # int
self.attributeInfo = None # dict
| 27.072948 | 145 | 0.461659 |
class LicenseInfoDTO(object):
def __init__(self):
self.swaggerTypes = {
'name': 'str',
'priority': 'str',
'type': 'str',
'description': 'str',
'validityPeriodRemaining': 'int',
'maxUsageCount': 'int',
'eulaStatus': 'bool',
'validityPeriod': 'int',
'usageCount': 'int',
'physicalIndex': 'str',
'licenseIndex': 'int',
'featureVersion': 'str',
'counted': 'bool',
'totalCount': 'int',
'provisionState': 'int',
'parentId': 'int',
'expiredPeriod': 'int',
'usageCountRemaining': 'int',
'status': 'str',
'id': 'str',
'deployPending': 'int',
'hostId': 'str',
'evalPeriodLeft': 'str',
'evalPeriodUsed': 'str',
'expiredDate': 'str',
'isCounted': 'bool',
'isEulaAccepted': 'bool',
'isEulaApplicable': 'bool',
'isTechnologyLicense': 'bool',
'licenseFileCount': 'int',
'licenseFileName': 'str',
'storeName': 'str',
'storedUsed': 'int',
'attributeInfo': 'dict'
}
self.attributeMap = {
'name': 'name',
'priority': 'priority',
'type': 'type',
'description': 'description',
'validityPeriodRemaining': 'validityPeriodRemaining',
'maxUsageCount': 'maxUsageCount',
'eulaStatus': 'eulaStatus',
'validityPeriod': 'validityPeriod',
'usageCount': 'usageCount',
'physicalIndex': 'physicalIndex',
'licenseIndex': 'licenseIndex',
'featureVersion': 'featureVersion',
'counted': 'counted',
'totalCount': 'totalCount',
'provisionState': 'provisionState',
'parentId': 'parentId',
'expiredPeriod': 'expiredPeriod',
'usageCountRemaining': 'usageCountRemaining',
'status': 'status',
'id': 'id',
'deployPending': 'deployPending',
'hostId': 'hostId',
'evalPeriodLeft': 'evalPeriodLeft',
'evalPeriodUsed': 'evalPeriodUsed',
'expiredDate': 'expiredDate',
'isCounted': 'isCounted',
'isEulaAccepted': 'isEulaAccepted',
'isEulaApplicable': 'isEulaApplicable',
'isTechnologyLicense': 'isTechnologyLicense',
'licenseFileCount': 'licenseFileCount',
'licenseFileName': 'licenseFileName',
'storeName': 'storeName',
'storedUsed': 'storedUsed',
'attributeInfo': 'attributeInfo'
}
self.type = None
self.description = None
self.validityPeriodRemaining = None
self.maxUsageCount = None
self.eulaStatus = None
self.validityPeriod = None
self.usageCount = None
self.physicalIndex = None
self.licenseIndex = None
self.counted = None
self.totalCount = None
self.provisionState = None
self.parentId = None
self.expiredPeriod = None
self.usageCountRemaining = None
self.status = None
self.id = None
self.deployPending = None
self.hostId = None
self.evalPeriodLeft = None
self.evalPeriodUsed = None
self.expiredDate = None
self.isCounted = None
self.isEulaAccepted = None
self.isEulaApplicable = None
self.isTechnologyLicense = None
self.licenseFileCount = None
self.licenseFileName = None
self.attributeInfo = None
| true | true |
f715593dca93c7ea7889b286ec0ff7c88525f4e6 | 6,383 | py | Python | flowvision/models/style_transfer/stylenet.py | Oneflow-Inc/vision | 352e9240f63118112ea174bb2d0b502fa54be16f | [
"BSD-3-Clause"
] | 40 | 2021-10-19T02:34:56.000Z | 2022-03-25T07:49:44.000Z | flowvision/models/style_transfer/stylenet.py | Oneflow-Inc/vision | 352e9240f63118112ea174bb2d0b502fa54be16f | [
"BSD-3-Clause"
] | 53 | 2021-10-22T02:24:44.000Z | 2022-03-31T04:20:47.000Z | flowvision/models/style_transfer/stylenet.py | Oneflow-Inc/vision | 352e9240f63118112ea174bb2d0b502fa54be16f | [
"BSD-3-Clause"
] | 11 | 2022-01-06T02:57:07.000Z | 2022-03-23T15:19:51.000Z | """
Modified from https://github.com/Oneflow-Inc/models/blob/main/Vision/style_transform/fast_neural_style/neural_style/transformer_net.py
"""
from typing import Any
import oneflow as flow
from ..registry import ModelCreator
from ..utils import load_state_dict_from_url
__all__ = ["FastNeuralStyle", "fast_neural_style"]
style_model_urls = {
"sketch": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/sketch_oneflow.tar.gz",
"candy": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/candy_oneflow.tar.gz",
"mosaic": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/mosaic_oneflow.tar.gz",
"rain_princess": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/rain_princess_oneflow.tar.gz",
"udnie": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/udnie_oneflow.tar.gz",
}
class ConvLayer(flow.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(flow.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = flow.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = flow.nn.InstanceNorm2d(channels, affine=True)
self.relu = flow.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(flow.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
if self.upsample:
self.interpolate = flow.nn.UpsamplingNearest2d(scale_factor=upsample)
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.interpolate(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
class FastNeuralStyle(flow.nn.Module):
def __init__(self):
super(FastNeuralStyle, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = flow.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = flow.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = flow.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = flow.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = flow.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = flow.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
y = flow.clamp(y, 0, 255)
return y
@ModelCreator.register_model
def fast_neural_style(
pretrained: bool = False,
progress: bool = True,
style_model: str = "sketch",
**kwargs: Any
) -> FastNeuralStyle:
"""
Constructs the Fast Neural Style Transfer model.
.. note::
`Perceptual Losses for Real-Time Style Transfer and Super-Resolution <https://arxiv.org/abs/1603.08155>`_.
The required minimum input size of the model is 256x256.
For more details for how to use this model, users can refer to: `neural_style_transfer project <https://github.com/Oneflow-Inc/vision/tree/main/projects/neural_style_transfer>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
style_model (str): Which pretrained style model to download, user can choose from [sketch, candy, mosaic, rain_princess, udnie]. Default: ``sketch``
For example:
.. code-block:: python
>>> import flowvision
>>> stylenet = flowvision.models.style_transfer.fast_neural_style(pretrained=True, progress=True, style_model = "sketch")
"""
assert (
style_model in style_model_urls.keys()
), "`style_model` must choose from [sketch, candy, mosaic, rain_princess, udnie]"
model = FastNeuralStyle(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
style_model_urls[style_model], progress=progress
)
model.load_state_dict(state_dict)
return model
| 39.159509 | 186 | 0.675858 | from typing import Any
import oneflow as flow
from ..registry import ModelCreator
from ..utils import load_state_dict_from_url
__all__ = ["FastNeuralStyle", "fast_neural_style"]
style_model_urls = {
"sketch": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/sketch_oneflow.tar.gz",
"candy": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/candy_oneflow.tar.gz",
"mosaic": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/mosaic_oneflow.tar.gz",
"rain_princess": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/rain_princess_oneflow.tar.gz",
"udnie": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/udnie_oneflow.tar.gz",
}
class ConvLayer(flow.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(flow.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = flow.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = flow.nn.InstanceNorm2d(channels, affine=True)
self.relu = flow.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(flow.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
if self.upsample:
self.interpolate = flow.nn.UpsamplingNearest2d(scale_factor=upsample)
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.interpolate(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
class FastNeuralStyle(flow.nn.Module):
def __init__(self):
super(FastNeuralStyle, self).__init__()
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = flow.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = flow.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = flow.nn.InstanceNorm2d(128, affine=True)
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = flow.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = flow.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
self.relu = flow.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
y = flow.clamp(y, 0, 255)
return y
@ModelCreator.register_model
def fast_neural_style(
pretrained: bool = False,
progress: bool = True,
style_model: str = "sketch",
**kwargs: Any
) -> FastNeuralStyle:
assert (
style_model in style_model_urls.keys()
), "`style_model` must choose from [sketch, candy, mosaic, rain_princess, udnie]"
model = FastNeuralStyle(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
style_model_urls[style_model], progress=progress
)
model.load_state_dict(state_dict)
return model
| true | true |
f71559ccbf89d77b0fb702498ba29b015dc7c215 | 3,597 | py | Python | examples/pytorch/nlp/huggingface_models/common/tests/test_tokenization_small_blenderbot.py | huggingface/neural-compressor | aaad4c357a86914ffa583753c9a26d949838a2a5 | [
"Apache-2.0"
] | 172 | 2021-09-14T18:34:17.000Z | 2022-03-30T06:49:53.000Z | examples/pytorch/nlp/huggingface_models/common/tests/test_tokenization_small_blenderbot.py | intel/lp-opt-tool | 130eefa3586b38df6c0ff78cc8807ae273f6a63f | [
"Apache-2.0"
] | 40 | 2021-09-14T02:26:12.000Z | 2022-03-29T08:34:04.000Z | examples/pytorch/nlp/huggingface_models/common/tests/test_tokenization_small_blenderbot.py | intel/neural-compressor | 16a4a12045fcb468da4d33769aff2c1a5e2ba6ba | [
"Apache-2.0"
] | 33 | 2021-09-15T07:27:25.000Z | 2022-03-25T08:30:57.000Z | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Blenderbot small tokenizer."""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from .test_tokenization_common import TokenizerTesterMixin
class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BlenderbotSmallTokenizer
def setUp(self):
super().setUp()
vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
self.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "adapt act apte"
output_text = "adapt act apte"
return input_text, output_text
def test_full_blenderbot_small_tokenizer(self):
tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "adapt act apte"
bpe_tokens = ["adapt", "act", "ap@@", "te"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
input_bpe_tokens = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_special_tokens_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
assert tok("sam").input_ids == [1384]
src_text = "I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def test_empty_word_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
src_text = "I am a small frog ."
src_text_dot = ""
encoded = tok(src_text)["input_ids"]
encoded_dot = tok(src_text_dot)["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 41.344828 | 108 | 0.687517 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from .test_tokenization_common import TokenizerTesterMixin
class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BlenderbotSmallTokenizer
def setUp(self):
super().setUp()
vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
self.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "adapt act apte"
output_text = "adapt act apte"
return input_text, output_text
def test_full_blenderbot_small_tokenizer(self):
tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "adapt act apte"
bpe_tokens = ["adapt", "act", "ap@@", "te"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
input_bpe_tokens = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_special_tokens_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
assert tok("sam").input_ids == [1384]
src_text = "I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text != decoded
assert decoded == "i am a small frog ."
def test_empty_word_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
src_text = "I am a small frog ."
src_text_dot = ""
encoded = tok(src_text)["input_ids"]
encoded_dot = tok(src_text_dot)["input_ids"]
assert encoded[-1] == encoded_dot[0]
| true | true |
f71559da0abd1737aa33927ca6ae4d82a909ed60 | 1,426 | py | Python | enable-s3-encryption.py | thilinajayanath/s3-server-side-encryption | b1de6cc2785825df0c6f6769ff0693edd5d2e5f6 | [
"MIT"
] | null | null | null | enable-s3-encryption.py | thilinajayanath/s3-server-side-encryption | b1de6cc2785825df0c6f6769ff0693edd5d2e5f6 | [
"MIT"
] | null | null | null | enable-s3-encryption.py | thilinajayanath/s3-server-side-encryption | b1de6cc2785825df0c6f6769ff0693edd5d2e5f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import boto3
from botocore.exceptions import ClientError
parser = argparse.ArgumentParser(description='Check all S3 buckets in the AWS account and enables default encryption with AES256')
parser.add_argument('aws_account_name', type=str, help='Named AWS user account')
args = parser.parse_args()
session = boto3.session.Session(profile_name=args.aws_account_name)
s3 = session.client(service_name='s3')
enc_config = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
for bucket in s3.list_buckets()['Buckets']:
try:
enc_algorithm = s3.get_bucket_encryption(Bucket=bucket['Name'])['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm']
print('Bucket %s has default server-side encryption enabled with %s' % (bucket['Name'],enc_algorithm))
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
print('Bucket: %s does not have default server-side encryption enabled' % bucket['Name'])
try:
s3.put_bucket_encryption(Bucket=bucket['Name'],ServerSideEncryptionConfiguration=enc_config)
print('Enabled encryption on bucket: %s' % bucket['Name'])
except ClientError as e:
print(e.response['Error']['Code'])
else:
print(e.response['Error']['Code'])
| 36.564103 | 170 | 0.718093 |
import argparse
import boto3
from botocore.exceptions import ClientError
parser = argparse.ArgumentParser(description='Check all S3 buckets in the AWS account and enables default encryption with AES256')
parser.add_argument('aws_account_name', type=str, help='Named AWS user account')
args = parser.parse_args()
session = boto3.session.Session(profile_name=args.aws_account_name)
s3 = session.client(service_name='s3')
enc_config = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
for bucket in s3.list_buckets()['Buckets']:
try:
enc_algorithm = s3.get_bucket_encryption(Bucket=bucket['Name'])['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm']
print('Bucket %s has default server-side encryption enabled with %s' % (bucket['Name'],enc_algorithm))
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
print('Bucket: %s does not have default server-side encryption enabled' % bucket['Name'])
try:
s3.put_bucket_encryption(Bucket=bucket['Name'],ServerSideEncryptionConfiguration=enc_config)
print('Enabled encryption on bucket: %s' % bucket['Name'])
except ClientError as e:
print(e.response['Error']['Code'])
else:
print(e.response['Error']['Code'])
| true | true |
f7155a0bc67e529b57bf0704543609089406c58a | 1,557 | py | Python | a1/__init__.py | o-ran-sc/ric-plt-a1 | 902771612ffcf0541ea27dce35eb6f20bf885cf3 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | a1/__init__.py | o-ran-sc/ric-plt-a1 | 902771612ffcf0541ea27dce35eb6f20bf885cf3 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | a1/__init__.py | o-ran-sc/ric-plt-a1 | 902771612ffcf0541ea27dce35eb6f20bf885cf3 | [
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2020-05-24T05:51:03.000Z | 2021-08-23T07:21:49.000Z | # ==================================================================================
# Copyright (c) 2019 Nokia
# Copyright (c) 2018-2019 AT&T Intellectual Property.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================
"""
contains the app; broken out here for ease of unit testing
"""
import connexion
from prometheus_client import CollectorRegistry, generate_latest, multiprocess
app = connexion.App(__name__, specification_dir=".")
app.add_api("openapi.yaml", arguments={"title": "My Title"})
# python decorators feel like black magic to me
@app.app.route('/a1-p/metrics', methods=['GET'])
def metrics(): # pylint: disable=unused-variable
# /metrics API shouldn't be visible in the API documentation,
# hence it's added here in the create_app step
# requires environment variable prometheus_multiproc_dir
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
return generate_latest(registry)
| 42.081081 | 84 | 0.659602 |
import connexion
from prometheus_client import CollectorRegistry, generate_latest, multiprocess
app = connexion.App(__name__, specification_dir=".")
app.add_api("openapi.yaml", arguments={"title": "My Title"})
@app.app.route('/a1-p/metrics', methods=['GET'])
def metrics():
# hence it's added here in the create_app step
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
return generate_latest(registry)
| true | true |
f7155a5748b43f2a16ce87a6c062feb7b20551ab | 234 | py | Python | cieloApi3/__init__.py | thiagosm/API-3.0-Python | 2fab59d5e0ce7191d3c458e8fab9b1d3c6298748 | [
"MIT"
] | null | null | null | cieloApi3/__init__.py | thiagosm/API-3.0-Python | 2fab59d5e0ce7191d3c458e8fab9b1d3c6298748 | [
"MIT"
] | null | null | null | cieloApi3/__init__.py | thiagosm/API-3.0-Python | 2fab59d5e0ce7191d3c458e8fab9b1d3c6298748 | [
"MIT"
] | null | null | null | from .environment import *
from .merchant import *
from .sale import *
from .customer import *
from .creditCard import *
from .debitCard import *
from .payment import *
from .recurrentPayment import *
from .cieloEcommerce import *
| 18 | 31 | 0.75641 | from .environment import *
from .merchant import *
from .sale import *
from .customer import *
from .creditCard import *
from .debitCard import *
from .payment import *
from .recurrentPayment import *
from .cieloEcommerce import *
| true | true |
f7155a7ee909d8a5c4d0f88e8722b5abeabb44e3 | 13,929 | py | Python | google/ads/google_ads/v3/proto/services/asset_service_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v3/proto/services/asset_service_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/proto/services/asset_service_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | 1 | 2020-09-30T17:04:06.000Z | 2020-09-30T17:04:06.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/services/asset_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/asset_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\021AssetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\n:google/ads/googleads_v3/proto/services/asset_service.proto\x12 google.ads.googleads.v3.services\x1a\x33google/ads/googleads_v3/proto/resources/asset.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"P\n\x0fGetAssetRequest\x12=\n\rresource_name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1egoogleads.googleapis.com/Asset\"z\n\x13MutateAssetsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12I\n\noperations\x18\x02 \x03(\x0b\x32\x30.google.ads.googleads.v3.services.AssetOperationB\x03\xe0\x41\x02\"Y\n\x0e\x41ssetOperation\x12:\n\x06\x63reate\x18\x01 \x01(\x0b\x32(.google.ads.googleads.v3.resources.AssetH\x00\x42\x0b\n\toperation\"\\\n\x14MutateAssetsResponse\x12\x44\n\x07results\x18\x02 \x03(\x0b\x32\x33.google.ads.googleads.v3.services.MutateAssetResult\"*\n\x11MutateAssetResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xa8\x03\n\x0c\x41ssetService\x12\xa9\x01\n\x08GetAsset\x12\x31.google.ads.googleads.v3.services.GetAssetRequest\x1a(.google.ads.googleads.v3.resources.Asset\"@\x82\xd3\xe4\x93\x02*\x12(/v3/{resource_name=customers/*/assets/*}\xda\x41\rresource_name\x12\xce\x01\n\x0cMutateAssets\x12\x35.google.ads.googleads.v3.services.MutateAssetsRequest\x1a\x36.google.ads.googleads.v3.services.MutateAssetsResponse\"O\x82\xd3\xe4\x93\x02\x30\"+/v3/customers/{customer_id=*}/assets:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xf8\x01\n$com.google.ads.googleads.v3.servicesB\x11\x41ssetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETASSETREQUEST = _descriptor.Descriptor(
name='GetAssetRequest',
full_name='google.ads.googleads.v3.services.GetAssetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetAssetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A \n\036googleads.googleapis.com/Asset'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=344,
)
_MUTATEASSETSREQUEST = _descriptor.Descriptor(
name='MutateAssetsRequest',
full_name='google.ads.googleads.v3.services.MutateAssetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=346,
serialized_end=468,
)
_ASSETOPERATION = _descriptor.Descriptor(
name='AssetOperation',
full_name='google.ads.googleads.v3.services.AssetOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v3.services.AssetOperation.create', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v3.services.AssetOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=470,
serialized_end=559,
)
_MUTATEASSETSRESPONSE = _descriptor.Descriptor(
name='MutateAssetsResponse',
full_name='google.ads.googleads.v3.services.MutateAssetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v3.services.MutateAssetsResponse.results', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=653,
)
_MUTATEASSETRESULT = _descriptor.Descriptor(
name='MutateAssetResult',
full_name='google.ads.googleads.v3.services.MutateAssetResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.MutateAssetResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=697,
)
_MUTATEASSETSREQUEST.fields_by_name['operations'].message_type = _ASSETOPERATION
_ASSETOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET
_ASSETOPERATION.oneofs_by_name['operation'].fields.append(
_ASSETOPERATION.fields_by_name['create'])
_ASSETOPERATION.fields_by_name['create'].containing_oneof = _ASSETOPERATION.oneofs_by_name['operation']
_MUTATEASSETSRESPONSE.fields_by_name['results'].message_type = _MUTATEASSETRESULT
DESCRIPTOR.message_types_by_name['GetAssetRequest'] = _GETASSETREQUEST
DESCRIPTOR.message_types_by_name['MutateAssetsRequest'] = _MUTATEASSETSREQUEST
DESCRIPTOR.message_types_by_name['AssetOperation'] = _ASSETOPERATION
DESCRIPTOR.message_types_by_name['MutateAssetsResponse'] = _MUTATEASSETSRESPONSE
DESCRIPTOR.message_types_by_name['MutateAssetResult'] = _MUTATEASSETRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetRequest = _reflection.GeneratedProtocolMessageType('GetAssetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETASSETREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.GetAsset][google.ads.googleads.v3.services.AssetService.GetAsset]
Attributes:
resource_name:
Required. The resource name of the asset to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.GetAssetRequest)
))
_sym_db.RegisterMessage(GetAssetRequest)
MutateAssetsRequest = _reflection.GeneratedProtocolMessageType('MutateAssetsRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.MutateAssets][google.ads.googleads.v3.services.AssetService.MutateAssets]
Attributes:
customer_id:
Required. The ID of the customer whose assets are being
modified.
operations:
Required. The list of operations to perform on individual
assets.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetsRequest)
))
_sym_db.RegisterMessage(MutateAssetsRequest)
AssetOperation = _reflection.GeneratedProtocolMessageType('AssetOperation', (_message.Message,), dict(
DESCRIPTOR = _ASSETOPERATION,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """A single operation to create an asset. Supported asset types are
YoutubeVideoAsset, MediaBundleAsset, ImageAsset, and LeadFormAsset.
TextAsset should be created with Ad inline.
Attributes:
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
asset.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.AssetOperation)
))
_sym_db.RegisterMessage(AssetOperation)
MutateAssetsResponse = _reflection.GeneratedProtocolMessageType('MutateAssetsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSRESPONSE,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Response message for an asset mutate.
Attributes:
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetsResponse)
))
_sym_db.RegisterMessage(MutateAssetsResponse)
MutateAssetResult = _reflection.GeneratedProtocolMessageType('MutateAssetResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETRESULT,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """The result for the asset mutate.
Attributes:
resource_name:
The resource name returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetResult)
))
_sym_db.RegisterMessage(MutateAssetResult)
DESCRIPTOR._options = None
_GETASSETREQUEST.fields_by_name['resource_name']._options = None
_MUTATEASSETSREQUEST.fields_by_name['customer_id']._options = None
_MUTATEASSETSREQUEST.fields_by_name['operations']._options = None
_ASSETSERVICE = _descriptor.ServiceDescriptor(
name='AssetService',
full_name='google.ads.googleads.v3.services.AssetService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=700,
serialized_end=1124,
methods=[
_descriptor.MethodDescriptor(
name='GetAsset',
full_name='google.ads.googleads.v3.services.AssetService.GetAsset',
index=0,
containing_service=None,
input_type=_GETASSETREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET,
serialized_options=_b('\202\323\344\223\002*\022(/v3/{resource_name=customers/*/assets/*}\332A\rresource_name'),
),
_descriptor.MethodDescriptor(
name='MutateAssets',
full_name='google.ads.googleads.v3.services.AssetService.MutateAssets',
index=1,
containing_service=None,
input_type=_MUTATEASSETSREQUEST,
output_type=_MUTATEASSETSRESPONSE,
serialized_options=_b('\202\323\344\223\0020\"+/v3/customers/{customer_id=*}/assets:mutate:\001*\332A\026customer_id,operations'),
),
])
_sym_db.RegisterServiceDescriptor(_ASSETSERVICE)
DESCRIPTOR.services_by_name['AssetService'] = _ASSETSERVICE
# @@protoc_insertion_point(module_scope)
| 41.332344 | 1,840 | 0.77414 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/asset_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\021AssetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\n:google/ads/googleads_v3/proto/services/asset_service.proto\x12 google.ads.googleads.v3.services\x1a\x33google/ads/googleads_v3/proto/resources/asset.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"P\n\x0fGetAssetRequest\x12=\n\rresource_name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1egoogleads.googleapis.com/Asset\"z\n\x13MutateAssetsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12I\n\noperations\x18\x02 \x03(\x0b\x32\x30.google.ads.googleads.v3.services.AssetOperationB\x03\xe0\x41\x02\"Y\n\x0e\x41ssetOperation\x12:\n\x06\x63reate\x18\x01 \x01(\x0b\x32(.google.ads.googleads.v3.resources.AssetH\x00\x42\x0b\n\toperation\"\\\n\x14MutateAssetsResponse\x12\x44\n\x07results\x18\x02 \x03(\x0b\x32\x33.google.ads.googleads.v3.services.MutateAssetResult\"*\n\x11MutateAssetResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xa8\x03\n\x0c\x41ssetService\x12\xa9\x01\n\x08GetAsset\x12\x31.google.ads.googleads.v3.services.GetAssetRequest\x1a(.google.ads.googleads.v3.resources.Asset\"@\x82\xd3\xe4\x93\x02*\x12(/v3/{resource_name=customers/*/assets/*}\xda\x41\rresource_name\x12\xce\x01\n\x0cMutateAssets\x12\x35.google.ads.googleads.v3.services.MutateAssetsRequest\x1a\x36.google.ads.googleads.v3.services.MutateAssetsResponse\"O\x82\xd3\xe4\x93\x02\x30\"+/v3/customers/{customer_id=*}/assets:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xf8\x01\n$com.google.ads.googleads.v3.servicesB\x11\x41ssetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETASSETREQUEST = _descriptor.Descriptor(
name='GetAssetRequest',
full_name='google.ads.googleads.v3.services.GetAssetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetAssetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A \n\036googleads.googleapis.com/Asset'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=344,
)
_MUTATEASSETSREQUEST = _descriptor.Descriptor(
name='MutateAssetsRequest',
full_name='google.ads.googleads.v3.services.MutateAssetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=346,
serialized_end=468,
)
_ASSETOPERATION = _descriptor.Descriptor(
name='AssetOperation',
full_name='google.ads.googleads.v3.services.AssetOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v3.services.AssetOperation.create', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v3.services.AssetOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=470,
serialized_end=559,
)
_MUTATEASSETSRESPONSE = _descriptor.Descriptor(
name='MutateAssetsResponse',
full_name='google.ads.googleads.v3.services.MutateAssetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v3.services.MutateAssetsResponse.results', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=653,
)
_MUTATEASSETRESULT = _descriptor.Descriptor(
name='MutateAssetResult',
full_name='google.ads.googleads.v3.services.MutateAssetResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.MutateAssetResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=697,
)
_MUTATEASSETSREQUEST.fields_by_name['operations'].message_type = _ASSETOPERATION
_ASSETOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET
_ASSETOPERATION.oneofs_by_name['operation'].fields.append(
_ASSETOPERATION.fields_by_name['create'])
_ASSETOPERATION.fields_by_name['create'].containing_oneof = _ASSETOPERATION.oneofs_by_name['operation']
_MUTATEASSETSRESPONSE.fields_by_name['results'].message_type = _MUTATEASSETRESULT
DESCRIPTOR.message_types_by_name['GetAssetRequest'] = _GETASSETREQUEST
DESCRIPTOR.message_types_by_name['MutateAssetsRequest'] = _MUTATEASSETSREQUEST
DESCRIPTOR.message_types_by_name['AssetOperation'] = _ASSETOPERATION
DESCRIPTOR.message_types_by_name['MutateAssetsResponse'] = _MUTATEASSETSRESPONSE
DESCRIPTOR.message_types_by_name['MutateAssetResult'] = _MUTATEASSETRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetRequest = _reflection.GeneratedProtocolMessageType('GetAssetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETASSETREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.GetAsset][google.ads.googleads.v3.services.AssetService.GetAsset]
Attributes:
resource_name:
Required. The resource name of the asset to fetch.
""",
))
_sym_db.RegisterMessage(GetAssetRequest)
MutateAssetsRequest = _reflection.GeneratedProtocolMessageType('MutateAssetsRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.MutateAssets][google.ads.googleads.v3.services.AssetService.MutateAssets]
Attributes:
customer_id:
Required. The ID of the customer whose assets are being
modified.
operations:
Required. The list of operations to perform on individual
assets.
""",
))
_sym_db.RegisterMessage(MutateAssetsRequest)
AssetOperation = _reflection.GeneratedProtocolMessageType('AssetOperation', (_message.Message,), dict(
DESCRIPTOR = _ASSETOPERATION,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """A single operation to create an asset. Supported asset types are
YoutubeVideoAsset, MediaBundleAsset, ImageAsset, and LeadFormAsset.
TextAsset should be created with Ad inline.
Attributes:
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
asset.
""",
))
_sym_db.RegisterMessage(AssetOperation)
MutateAssetsResponse = _reflection.GeneratedProtocolMessageType('MutateAssetsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSRESPONSE,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Response message for an asset mutate.
Attributes:
results:
All results for the mutate.
""",
))
_sym_db.RegisterMessage(MutateAssetsResponse)
MutateAssetResult = _reflection.GeneratedProtocolMessageType('MutateAssetResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETRESULT,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """The result for the asset mutate.
Attributes:
resource_name:
The resource name returned for successful operations.
""",
))
_sym_db.RegisterMessage(MutateAssetResult)
DESCRIPTOR._options = None
_GETASSETREQUEST.fields_by_name['resource_name']._options = None
_MUTATEASSETSREQUEST.fields_by_name['customer_id']._options = None
_MUTATEASSETSREQUEST.fields_by_name['operations']._options = None
_ASSETSERVICE = _descriptor.ServiceDescriptor(
name='AssetService',
full_name='google.ads.googleads.v3.services.AssetService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=700,
serialized_end=1124,
methods=[
_descriptor.MethodDescriptor(
name='GetAsset',
full_name='google.ads.googleads.v3.services.AssetService.GetAsset',
index=0,
containing_service=None,
input_type=_GETASSETREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET,
serialized_options=_b('\202\323\344\223\002*\022(/v3/{resource_name=customers/*/assets/*}\332A\rresource_name'),
),
_descriptor.MethodDescriptor(
name='MutateAssets',
full_name='google.ads.googleads.v3.services.AssetService.MutateAssets',
index=1,
containing_service=None,
input_type=_MUTATEASSETSREQUEST,
output_type=_MUTATEASSETSRESPONSE,
serialized_options=_b('\202\323\344\223\0020\"+/v3/customers/{customer_id=*}/assets:mutate:\001*\332A\026customer_id,operations'),
),
])
_sym_db.RegisterServiceDescriptor(_ASSETSERVICE)
DESCRIPTOR.services_by_name['AssetService'] = _ASSETSERVICE
# @@protoc_insertion_point(module_scope)
| true | true |
f7155b122a1e35ce2adb4b96a2aadaade4b5777f | 567 | py | Python | center/app/playback/parsers/ip_parser.py | netSensTeam/netSens | 7ab5f41a7103e6c86aa6cb2eff3df68c301e48c1 | [
"MIT"
] | null | null | null | center/app/playback/parsers/ip_parser.py | netSensTeam/netSens | 7ab5f41a7103e6c86aa6cb2eff3df68c301e48c1 | [
"MIT"
] | 3 | 2021-05-10T13:50:55.000Z | 2022-03-02T08:12:46.000Z | center/app/playback/parsers/ip_parser.py | netSensTeam/netSens | 7ab5f41a7103e6c86aa6cb2eff3df68c301e48c1 | [
"MIT"
] | null | null | null | import dpkt
from parsers.utils import *
name = 'ip_parser'
def parseFunc(ts, eth):
if getMACString(eth.dst) == 'FF:FF:FF:FF:FF:FF':
return None
if isinstance(eth.data, dpkt.ip.IP):
return parseIPPacket(ts, eth)
def parseIPPacket(ts, eth):
ip = eth.data
tpa = getIPString(ip.dst)
tha = getMACString(eth.dst)
return {
'protocol': 'ip',
'layer': 3,
'time': ts,
'description': 'ip packet to (%s,%s)' % (tha, tpa),
'target': {
'ip': tpa,
'mac': tha
}
} | 21.807692 | 59 | 0.527337 | import dpkt
from parsers.utils import *
name = 'ip_parser'
def parseFunc(ts, eth):
if getMACString(eth.dst) == 'FF:FF:FF:FF:FF:FF':
return None
if isinstance(eth.data, dpkt.ip.IP):
return parseIPPacket(ts, eth)
def parseIPPacket(ts, eth):
ip = eth.data
tpa = getIPString(ip.dst)
tha = getMACString(eth.dst)
return {
'protocol': 'ip',
'layer': 3,
'time': ts,
'description': 'ip packet to (%s,%s)' % (tha, tpa),
'target': {
'ip': tpa,
'mac': tha
}
} | true | true |
f7155c6b7863bc6529f4f6f16e61fb8e883beed8 | 6,035 | py | Python | pandas_study/PandasTest.py | BreezeDawn/numpy-pandas-matplotlib- | e55dccb2442e57c2fccb2081966a7c19e731083a | [
"MIT"
] | null | null | null | pandas_study/PandasTest.py | BreezeDawn/numpy-pandas-matplotlib- | e55dccb2442e57c2fccb2081966a7c19e731083a | [
"MIT"
] | null | null | null | pandas_study/PandasTest.py | BreezeDawn/numpy-pandas-matplotlib- | e55dccb2442e57c2fccb2081966a7c19e731083a | [
"MIT"
] | 1 | 2018-10-24T07:33:51.000Z | 2018-10-24T07:33:51.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def base():
index = pd.date_range('20181023', periods=9) # 生成9个行索引
column = ['a', 'b', 'c', 'd'] # 生成4个列索引
a = np.random.randn(9, 4) # 随便生成的9行4列的数据
df = pd.DataFrame(a, index=index, columns=column)
print(df)
print(pd.DataFrame(np.arange(9).reshape((3, 3)))) # 行和列的默认索引为从0开始的数字
print(df.dtypes) # 查看每列的数据类型
print(df.index) # 查看每行的行索引
print(df.columns) # 查看每列的列索引
print(df.values) # 查看所有值
print(df.describe()) # 查看每列的详细统计 数目/平均值/....
print(df.T) # pandas的转置
print(df.sort_index(axis=1, ascending=False)) # 按索引排序 axis: 1列排序 0行排序 ascending: False反排序(从小向大) True正排序(从大向小)
print(df.sort_values(by='a')) # 把a列的值进行排序 默认从小向大
def select():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
print(df)
print(df.A) # 取出A列数据(带索引)
print(df[2:3]) # 切片取数据
print(df[2:3]) # 切片取数据
print(df['2018-10-25':'2018-10-26']) # 切片取数据
print(df.loc['2018-10-25', ['A', 'B']]) # 按照标签取数据
print(df.iloc[[1, 3, 5], 1:5]) # 按照数字取数据
print(df.ix['2018-10-25':'2018-10-26', 1:5]) # 数字标签结合取数据
print(df[df.A > 8]) # A列中的元素大于8的都显示
def update():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[2, 3] = -555 # 修改值 选中就能修改
df.B[df.A > 8] = 0 # A列中的元素大于8的都把B修改为0
print(df)
df['E'] = pd.Series(np.arange(6), pd.date_range('20181023', periods=6)) # 增加一列
print(df)
def handle_NaN():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[1, 2] = np.nan
df.iloc[0, 1] = np.nan
print(df)
print(df.dropna(axis=1, how='any')) # 丢掉缺失值(返回新的结果不影响原始数据) axis: 1丢掉列 0丢掉行 how: any任何一个是NaN就丢掉 all全是NaN就丢掉
print(df.fillna(value=0)) # 填充缺失值 填充为0
print(df.isnull()) # 检查每个元素是否缺失值,结果返回一个bool填充
print(np.any(df.isnull())) # np.any 检查至少有一个False,是的话返回True
def read_save_data():
data = pd.read_csv('./pand.csv') # 读取csv文件数据(csv内部逗号分隔)
print(data)
data.to_pickle('./pand.pickle') # 保存数据到pickle文件
def merge_DataFrame():
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(2 * np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
print(df1)
print(df2)
print(df3)
res = pd.concat([df1, df2, df3], axis=0) # axis: 0上下合并 1左右合并
print(res)
res = pd.concat([df1, df2, df3], axis=1, ignore_index=True) # ignore_index 忽略前面所有的index并重新排序
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'], index=[1, 2, 3])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['b', 'c', 'd', 'e'], index=[2, 3, 4])
res = pd.concat([df1, df2], axis=0, join='outer', sort=True) # 上下合并,outer如果有不一样的列字段,就用NaN填充
print(res)
res = pd.concat([df1, df2], axis=0, join='inner', sort=True, ignore_index=True) # 上下合并, inner有不一样的列字段就丢掉那一列,保留相同字段
print(res)
res = pd.concat([df1, df2], axis=1, ) # 左右合并,有不一样的行字段就用NaN填充
print(res)
res = pd.concat([df1, df2], axis=1, join_axes=[df1.index]) # 左右合并,行字段按照df1的行字段来,缺失值用NaN填充,其余df1没有的字段丢掉
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
res = df1.append(df2, ignore_index=True) # df1后面加上df2
print(res)
res = df1.append([df2, df3], ignore_index=True) # df1后面加上df2,df3
print(res)
sl = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
res = df1.append(sl, ignore_index=True)
print(res)
def merge():
left = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
print(left)
print(right)
res = pd.merge(left, right, on='key') # 左右合并,key字段保留一个
print(res)
left = pd.DataFrame({
'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
res = pd.merge(left, right, on=['key1', 'key2'], how='inner') # 解释不清,看结果
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='outer',indicator='indicator_column') # 不管一不一样都保留 indicator写出哪些一样哪些不一样,写字符串可改名
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='left') # 左的on字段完全不动的保留
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='right') # 右的on字段完全不动的保留
print(res)
res = pd.merge(left, right, left_index=True,right_index=True, how='right') # 根据索引保留
print(res)
def plot_test():
# 1000个一维数据累加
data = pd.Series(np.random.randn(1000),index=np.arange(1000))
data = data.cumsum()
# data.plot()
# plt.show()
# 矩阵
data = pd.DataFrame(np.random.randn(1000,4),index=np.arange(1000),columns=list('ABCD'))
data = data.cumsum()
print(data.head()) # head显示前五个数据,默认5个
data.plot() # 线性
ax = data.plot.scatter(x='A',y='B',color='DarkBlue', label='Class 1') # scatter 数据点 只有x,y
data.plot.scatter(x='A',y='C',color='DarkGreen', label='Class 2',ax=ax) # ax和前面的在一张图上
plt.show()
# plot method : bar条形图 hist box kde area scatter hexbin pie
if __name__ == '__main__':
# base()
# select()
# update()
# handle_NaN()
# read_save_data()
# merge_DataFrame()
# merge()
plot_test() | 35.922619 | 136 | 0.566031 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def base():
index = pd.date_range('20181023', periods=9)
column = ['a', 'b', 'c', 'd']
a = np.random.randn(9, 4)
df = pd.DataFrame(a, index=index, columns=column)
print(df)
print(pd.DataFrame(np.arange(9).reshape((3, 3))))
print(df.dtypes)
print(df.index)
print(df.columns)
print(df.values)
print(df.describe())
print(df.T)
print(df.sort_index(axis=1, ascending=False))
print(df.sort_values(by='a'))
def select():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
print(df)
print(df.A)
print(df[2:3])
print(df[2:3])
print(df['2018-10-25':'2018-10-26'])
print(df.loc['2018-10-25', ['A', 'B']])
print(df.iloc[[1, 3, 5], 1:5])
print(df.ix['2018-10-25':'2018-10-26', 1:5])
print(df[df.A > 8])
def update():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[2, 3] = -555
df.B[df.A > 8] = 0
print(df)
df['E'] = pd.Series(np.arange(6), pd.date_range('20181023', periods=6))
print(df)
def handle_NaN():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[1, 2] = np.nan
df.iloc[0, 1] = np.nan
print(df)
print(df.dropna(axis=1, how='any'))
print(df.fillna(value=0))
print(df.isnull())
print(np.any(df.isnull()))
def read_save_data():
data = pd.read_csv('./pand.csv')
print(data)
data.to_pickle('./pand.pickle')
def merge_DataFrame():
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(2 * np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
print(df1)
print(df2)
print(df3)
res = pd.concat([df1, df2, df3], axis=0)
print(res)
res = pd.concat([df1, df2, df3], axis=1, ignore_index=True)
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'], index=[1, 2, 3])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['b', 'c', 'd', 'e'], index=[2, 3, 4])
res = pd.concat([df1, df2], axis=0, join='outer', sort=True)
print(res)
res = pd.concat([df1, df2], axis=0, join='inner', sort=True, ignore_index=True)
print(res)
res = pd.concat([df1, df2], axis=1, )
print(res)
res = pd.concat([df1, df2], axis=1, join_axes=[df1.index])
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
res = df1.append(df2, ignore_index=True)
print(res)
res = df1.append([df2, df3], ignore_index=True)
print(res)
sl = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
res = df1.append(sl, ignore_index=True)
print(res)
def merge():
left = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
print(left)
print(right)
res = pd.merge(left, right, on='key')
print(res)
left = pd.DataFrame({
'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
res = pd.merge(left, right, on=['key1', 'key2'], how='inner')
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='outer',indicator='indicator_column')
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='left')
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='right')
print(res)
res = pd.merge(left, right, left_index=True,right_index=True, how='right')
print(res)
def plot_test():
data = pd.Series(np.random.randn(1000),index=np.arange(1000))
data = data.cumsum()
data = pd.DataFrame(np.random.randn(1000,4),index=np.arange(1000),columns=list('ABCD'))
data = data.cumsum()
print(data.head())
data.plot()
ax = data.plot.scatter(x='A',y='B',color='DarkBlue', label='Class 1')
data.plot.scatter(x='A',y='C',color='DarkGreen', label='Class 2',ax=ax)
plt.show()
if __name__ == '__main__':
plot_test() | true | true |
f7155cefed90acb6d45f43fe242cbb9b2848c3cd | 5,649 | py | Python | recognition/ArcFace/sample_config.py | santapo/insightface | d61b09938bce244c4f775cee1d9d76ff641b7b0c | [
"MIT"
] | null | null | null | recognition/ArcFace/sample_config.py | santapo/insightface | d61b09938bce244c4f775cee1d9d76ff641b7b0c | [
"MIT"
] | null | null | null | recognition/ArcFace/sample_config.py | santapo/insightface | d61b09938bce244c4f775cee1d9d76ff641b7b0c | [
"MIT"
] | null | null | null | import numpy as np
import os
from easydict import EasyDict as edict
config = edict()
config.bn_mom = 0.9
config.workspace = 256
config.emb_size = 512
config.ckpt_embedding = True
config.net_se = 0
config.net_act = 'prelu'
config.net_unit = 3
config.net_input = 1
config.net_blocks = [1, 4, 6, 2]
config.net_output = 'E'
config.net_multiplier = 1.0
config.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
config.ce_loss = True
config.fc7_lr_mult = 1.0
config.fc7_wd_mult = 1.0
config.fc7_no_bias = False
config.max_steps = 0
config.data_rand_mirror = True
config.data_cutoff = False
config.data_color = 0
config.data_images_filter = 0
config.count_flops = False
config.memonger = False #not work now
# network settings
network = edict()
network.r100 = edict()
network.r100.net_name = 'fresnet'
network.r100.num_layers = 100
network.r100fc = edict()
network.r100fc.net_name = 'fresnet'
network.r100fc.num_layers = 100
network.r100fc.net_output = 'FC'
network.r50 = edict()
network.r50.net_name = 'fresnet'
network.r50.num_layers = 50
network.r50v1 = edict()
network.r50v1.net_name = 'fresnet'
network.r50v1.num_layers = 50
network.r50v1.net_unit = 1
network.d169 = edict()
network.d169.net_name = 'fdensenet'
network.d169.num_layers = 169
network.d169.per_batch_size = 64
network.d169.densenet_dropout = 0.0
network.d201 = edict()
network.d201.net_name = 'fdensenet'
network.d201.num_layers = 201
network.d201.per_batch_size = 64
network.d201.densenet_dropout = 0.0
network.y1 = edict()
network.y1.net_name = 'fmobilefacenet'
network.y1.emb_size = 128
network.y1.net_output = 'GDC'
network.y2 = edict()
network.y2.net_name = 'fmobilefacenet'
network.y2.emb_size = 256
network.y2.net_output = 'GDC'
network.y2.net_blocks = [2, 8, 16, 4]
network.m1 = edict()
network.m1.net_name = 'fmobilenet'
network.m1.emb_size = 256
network.m1.net_output = 'GDC'
network.m1.net_multiplier = 1.0
network.m05 = edict()
network.m05.net_name = 'fmobilenet'
network.m05.emb_size = 256
network.m05.net_output = 'GDC'
network.m05.net_multiplier = 0.5
network.mnas = edict()
network.mnas.net_name = 'fmnasnet'
network.mnas.emb_size = 256
network.mnas.net_output = 'GDC'
network.mnas.net_multiplier = 1.0
network.mnas05 = edict()
network.mnas05.net_name = 'fmnasnet'
network.mnas05.emb_size = 256
network.mnas05.net_output = 'GDC'
network.mnas05.net_multiplier = 0.5
network.mnas025 = edict()
network.mnas025.net_name = 'fmnasnet'
network.mnas025.emb_size = 256
network.mnas025.net_output = 'GDC'
network.mnas025.net_multiplier = 0.25
network.vargfacenet = edict()
network.vargfacenet.net_name = 'vargfacenet'
network.vargfacenet.net_multiplier = 1.25
network.vargfacenet.emb_size = 512
network.vargfacenet.net_output = 'J'
# dataset settings
dataset = edict()
dataset.emore = edict()
dataset.emore.dataset = 'emore'
dataset.emore.dataset_path = '../datasets/faces_emore'
dataset.emore.num_classes = 85742
dataset.emore.image_shape = (112, 112, 3)
dataset.emore.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
dataset.retina = edict()
dataset.retina.dataset = 'retina'
dataset.retina.dataset_path = '../datasets/ms1m-retinaface-t1'
dataset.retina.num_classes = 93431
dataset.retina.image_shape = (112, 112, 3)
dataset.retina.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
loss = edict()
loss.softmax = edict()
loss.softmax.loss_name = 'softmax'
loss.nsoftmax = edict()
loss.nsoftmax.loss_name = 'margin_softmax'
loss.nsoftmax.loss_s = 64.0
loss.nsoftmax.loss_m1 = 1.0
loss.nsoftmax.loss_m2 = 0.0
loss.nsoftmax.loss_m3 = 0.0
loss.arcface = edict()
loss.arcface.loss_name = 'margin_softmax'
loss.arcface.loss_s = 64.0
loss.arcface.loss_m1 = 1.0
loss.arcface.loss_m2 = 0.5
loss.arcface.loss_m3 = 0.0
loss.cosface = edict()
loss.cosface.loss_name = 'margin_softmax'
loss.cosface.loss_s = 64.0
loss.cosface.loss_m1 = 1.0
loss.cosface.loss_m2 = 0.0
loss.cosface.loss_m3 = 0.35
loss.combined = edict()
loss.combined.loss_name = 'margin_softmax'
loss.combined.loss_s = 64.0
loss.combined.loss_m1 = 1.0
loss.combined.loss_m2 = 0.3
loss.combined.loss_m3 = 0.2
loss.triplet = edict()
loss.triplet.loss_name = 'triplet'
loss.triplet.images_per_identity = 5
loss.triplet.triplet_alpha = 0.3
loss.triplet.triplet_bag_size = 7200
loss.triplet.triplet_max_ap = 0.0
loss.triplet.per_batch_size = 60
loss.triplet.lr = 0.05
loss.atriplet = edict()
loss.atriplet.loss_name = 'atriplet'
loss.atriplet.images_per_identity = 5
loss.atriplet.triplet_alpha = 0.35
loss.atriplet.triplet_bag_size = 7200
loss.atriplet.triplet_max_ap = 0.0
loss.atriplet.per_batch_size = 60
loss.atriplet.lr = 0.05
# default settings
default = edict()
# default network
default.network = 'r100'
default.pretrained = ''
default.pretrained_epoch = 1
# default dataset
default.dataset = 'emore'
default.loss = 'arcface'
default.frequent = 20
default.verbose = 2000
default.kvstore = 'device'
default.end_epoch = 10000
default.lr = 0.1
default.wd = 0.0005
default.mom = 0.9
default.per_batch_size = 128
default.ckpt = 3
default.lr_steps = '100000,160000,220000'
default.models_root = './models'
def generate_config(_network, _dataset, _loss):
for k, v in loss[_loss].items():
config[k] = v
if k in default:
default[k] = v
for k, v in network[_network].items():
config[k] = v
if k in default:
default[k] = v
for k, v in dataset[_dataset].items():
config[k] = v
if k in default:
default[k] = v
config.loss = _loss
config.network = _network
config.dataset = _dataset
config.num_workers = 1
if 'DMLC_NUM_WORKER' in os.environ:
config.num_workers = int(os.environ['DMLC_NUM_WORKER'])
| 25.561086 | 63 | 0.738007 | import numpy as np
import os
from easydict import EasyDict as edict
config = edict()
config.bn_mom = 0.9
config.workspace = 256
config.emb_size = 512
config.ckpt_embedding = True
config.net_se = 0
config.net_act = 'prelu'
config.net_unit = 3
config.net_input = 1
config.net_blocks = [1, 4, 6, 2]
config.net_output = 'E'
config.net_multiplier = 1.0
config.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
config.ce_loss = True
config.fc7_lr_mult = 1.0
config.fc7_wd_mult = 1.0
config.fc7_no_bias = False
config.max_steps = 0
config.data_rand_mirror = True
config.data_cutoff = False
config.data_color = 0
config.data_images_filter = 0
config.count_flops = False
config.memonger = False
network = edict()
network.r100 = edict()
network.r100.net_name = 'fresnet'
network.r100.num_layers = 100
network.r100fc = edict()
network.r100fc.net_name = 'fresnet'
network.r100fc.num_layers = 100
network.r100fc.net_output = 'FC'
network.r50 = edict()
network.r50.net_name = 'fresnet'
network.r50.num_layers = 50
network.r50v1 = edict()
network.r50v1.net_name = 'fresnet'
network.r50v1.num_layers = 50
network.r50v1.net_unit = 1
network.d169 = edict()
network.d169.net_name = 'fdensenet'
network.d169.num_layers = 169
network.d169.per_batch_size = 64
network.d169.densenet_dropout = 0.0
network.d201 = edict()
network.d201.net_name = 'fdensenet'
network.d201.num_layers = 201
network.d201.per_batch_size = 64
network.d201.densenet_dropout = 0.0
network.y1 = edict()
network.y1.net_name = 'fmobilefacenet'
network.y1.emb_size = 128
network.y1.net_output = 'GDC'
network.y2 = edict()
network.y2.net_name = 'fmobilefacenet'
network.y2.emb_size = 256
network.y2.net_output = 'GDC'
network.y2.net_blocks = [2, 8, 16, 4]
network.m1 = edict()
network.m1.net_name = 'fmobilenet'
network.m1.emb_size = 256
network.m1.net_output = 'GDC'
network.m1.net_multiplier = 1.0
network.m05 = edict()
network.m05.net_name = 'fmobilenet'
network.m05.emb_size = 256
network.m05.net_output = 'GDC'
network.m05.net_multiplier = 0.5
network.mnas = edict()
network.mnas.net_name = 'fmnasnet'
network.mnas.emb_size = 256
network.mnas.net_output = 'GDC'
network.mnas.net_multiplier = 1.0
network.mnas05 = edict()
network.mnas05.net_name = 'fmnasnet'
network.mnas05.emb_size = 256
network.mnas05.net_output = 'GDC'
network.mnas05.net_multiplier = 0.5
network.mnas025 = edict()
network.mnas025.net_name = 'fmnasnet'
network.mnas025.emb_size = 256
network.mnas025.net_output = 'GDC'
network.mnas025.net_multiplier = 0.25
network.vargfacenet = edict()
network.vargfacenet.net_name = 'vargfacenet'
network.vargfacenet.net_multiplier = 1.25
network.vargfacenet.emb_size = 512
network.vargfacenet.net_output = 'J'
dataset = edict()
dataset.emore = edict()
dataset.emore.dataset = 'emore'
dataset.emore.dataset_path = '../datasets/faces_emore'
dataset.emore.num_classes = 85742
dataset.emore.image_shape = (112, 112, 3)
dataset.emore.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
dataset.retina = edict()
dataset.retina.dataset = 'retina'
dataset.retina.dataset_path = '../datasets/ms1m-retinaface-t1'
dataset.retina.num_classes = 93431
dataset.retina.image_shape = (112, 112, 3)
dataset.retina.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
loss = edict()
loss.softmax = edict()
loss.softmax.loss_name = 'softmax'
loss.nsoftmax = edict()
loss.nsoftmax.loss_name = 'margin_softmax'
loss.nsoftmax.loss_s = 64.0
loss.nsoftmax.loss_m1 = 1.0
loss.nsoftmax.loss_m2 = 0.0
loss.nsoftmax.loss_m3 = 0.0
loss.arcface = edict()
loss.arcface.loss_name = 'margin_softmax'
loss.arcface.loss_s = 64.0
loss.arcface.loss_m1 = 1.0
loss.arcface.loss_m2 = 0.5
loss.arcface.loss_m3 = 0.0
loss.cosface = edict()
loss.cosface.loss_name = 'margin_softmax'
loss.cosface.loss_s = 64.0
loss.cosface.loss_m1 = 1.0
loss.cosface.loss_m2 = 0.0
loss.cosface.loss_m3 = 0.35
loss.combined = edict()
loss.combined.loss_name = 'margin_softmax'
loss.combined.loss_s = 64.0
loss.combined.loss_m1 = 1.0
loss.combined.loss_m2 = 0.3
loss.combined.loss_m3 = 0.2
loss.triplet = edict()
loss.triplet.loss_name = 'triplet'
loss.triplet.images_per_identity = 5
loss.triplet.triplet_alpha = 0.3
loss.triplet.triplet_bag_size = 7200
loss.triplet.triplet_max_ap = 0.0
loss.triplet.per_batch_size = 60
loss.triplet.lr = 0.05
loss.atriplet = edict()
loss.atriplet.loss_name = 'atriplet'
loss.atriplet.images_per_identity = 5
loss.atriplet.triplet_alpha = 0.35
loss.atriplet.triplet_bag_size = 7200
loss.atriplet.triplet_max_ap = 0.0
loss.atriplet.per_batch_size = 60
loss.atriplet.lr = 0.05
default = edict()
default.network = 'r100'
default.pretrained = ''
default.pretrained_epoch = 1
default.dataset = 'emore'
default.loss = 'arcface'
default.frequent = 20
default.verbose = 2000
default.kvstore = 'device'
default.end_epoch = 10000
default.lr = 0.1
default.wd = 0.0005
default.mom = 0.9
default.per_batch_size = 128
default.ckpt = 3
default.lr_steps = '100000,160000,220000'
default.models_root = './models'
def generate_config(_network, _dataset, _loss):
for k, v in loss[_loss].items():
config[k] = v
if k in default:
default[k] = v
for k, v in network[_network].items():
config[k] = v
if k in default:
default[k] = v
for k, v in dataset[_dataset].items():
config[k] = v
if k in default:
default[k] = v
config.loss = _loss
config.network = _network
config.dataset = _dataset
config.num_workers = 1
if 'DMLC_NUM_WORKER' in os.environ:
config.num_workers = int(os.environ['DMLC_NUM_WORKER'])
| true | true |
f7155df326b76283c65218da1b03afe376a21473 | 540 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/wonderworks-33344 | 42cb504b280e4ad33598ae0d5ac64f8654e28205 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/wonderworks-33344 | 42cb504b280e4ad33598ae0d5ac64f8654e28205 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/wonderworks-33344 | 42cb504b280e4ad33598ae0d5ac64f8654e28205 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wonderworks-33344.botics.co"
site_params = {
"name": "Wonderworks",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.769231 | 61 | 0.659259 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wonderworks-33344.botics.co"
site_params = {
"name": "Wonderworks",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| true | true |
f7155e52ae7e7e0208d80c721374978ecf74f3d8 | 6,483 | py | Python | src/HCTool-sg-1.py | iShog/huaweicloudTool | 56b231d4707570e7690b68d31b3bfd8920e995bc | [
"MIT"
] | null | null | null | src/HCTool-sg-1.py | iShog/huaweicloudTool | 56b231d4707570e7690b68d31b3bfd8920e995bc | [
"MIT"
] | null | null | null | src/HCTool-sg-1.py | iShog/huaweicloudTool | 56b231d4707570e7690b68d31b3bfd8920e995bc | [
"MIT"
] | null | null | null | # coding: utf-8
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.http.http_config import HttpConfig
"""
# 导入指定云服务的库 huaweicloudsdk{service}
"""
from huaweicloudsdkvpc.v2 import *
from huaweicloudsdkvpc.v2.region.vpc_region import VpcRegion
"""
# 导入其它依赖库
"""
from urllib.request import urlopen
from json import load, loads
from Crypto.Cipher import AES
import time, os, base64, sys, getopt
"""
# 导入IPy
# --(Class and tools for handling of IPv4 and IPv6 addresses and networks)
#用于判断当前公网IP地址是IPv4 or IPv6
"""
import IPy
aes_key_from_cli = ''
ip_from_cli = ''
"""
# 从命令行获取解密秘钥、指定的IP地址等信息
"""
def start(argv):
if not argv:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
try:
opts, args = getopt.getopt(argv, "hk:i:", ["help", "key=", "ip="])
except getopt.GetoptError:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('# HCTool-XXX.py -k <aes_key> -i <ip_addr> OR \n# HCTool-XXX.py --key=<aes_key> --ip=<ip_addr>')
sys.exit()
elif opt in ("-k", "--key"):
global aes_key_from_cli
aes_key_from_cli = arg
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@start()': 'ERROR: key must not be NULL!'})
sys.exit(2)
else:
print({'create_security_group_rule_tool: message@start()': 'key is: ' + aes_key_from_cli})
elif opt in ("-i", "--ip"):
global ip_from_cli
ip_from_cli = arg
if ip_from_cli != '':
print({'create_security_group_rule_tool: message@start()': 'ip addr is: ' + ip_from_cli})
else:
print({'create_security_group_rule_tool: error@start()': 'ERROR: ip is NULL!'})
sys.exit(2)
"""
# en_val为经过base64编码后的密文string
"""
def decrypt_env(en_val):
(aes_key, aes_iv, aes_mode) = (aes_key_from_cli, 'knx5FQtE4XOQ', AES.MODE_GCM)
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@decrypt_env()': 'ERROR: key must not be NULL!'})
sys.exit(2)
aes_de_instance = AES.new(aes_key.encode('utf-8'), aes_mode, aes_iv.encode('utf-8'))
plain_val = aes_de_instance.decrypt(base64.b64decode(en_val.encode('utf-8'))).decode('utf-8')
return plain_val
"""
# 获取个人云环境配置
# en_cred_dict = {'EN_AK':' ','EN_SK':' ','EN_ProjectID':' ','Region':' '}
"""
def get_cred_config():
en_env_data = os.getenv('EN_CRED_JSON_STR')
en_cred_dict = loads(en_env_data)
en_ak = en_cred_dict['EN_AK']
en_sk = en_cred_dict['EN_SK']
en_project_id = en_cred_dict['EN_ProjectID']
ak = decrypt_env(en_ak)
sk = decrypt_env(en_sk)
project_id = decrypt_env(en_project_id)
region = en_cred_dict['Region']
security_group_id = en_cred_dict['SecurityGroupID']
endpoint = "https://" + "vpc." + region + ".myhwclouds.com"
print({'create_security_group_rule_tool: message@get_cred_config()': 'current endpoint is: ' + endpoint})
return ak, sk, project_id, region, endpoint, security_group_id
"""
# demo 列出所有VPC
"""
def list_vpc(client):
try:
request = ListVpcsRequest()
response = client.list_vpcs(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
"""
# demo 列出所有SecurityGroupRules
"""
def list_sg(client):
try:
request = ListSecurityGroupRulesRequest()
response = client.list_security_group_rules(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
"""
# 创建放通通当前工具所在主机公网IP的安全组
"""
def get_pub_ip_from_inet():
ip_from_inet = ''
for num in range(1, 3):
if num == 1:
ip_from_inet = load(urlopen('https://httpbin.org/ip'))['origin']
elif num == 2:
ip_from_inet = load(urlopen('https://api.ipify.org/?format=json'))['ip']
else:
ip_from_inet = load(urlopen('https://jsonip.com'))['ip']
if IPy.IP(ip_from_inet).version() == 4:
break
return ip_from_inet
"""
# 创建放通通当前工具所在主机公网IP的安全组
"""
def create_sg(client, security_group_id):
global ip_from_cli
cur_ip = ip_from_cli
if cur_ip == '':
cur_ip = get_pub_ip_from_inet()
print({'create_security_group_rule_tool: message@create_sg()': 'current public network IP is: ' + cur_ip})
try:
if IPy.IP(cur_ip).version() == 6:
ethertype = 'IPv6'
remote_ip_prefix = cur_ip
elif IPy.IP(cur_ip).version() == 4:
ethertype = 'IPv4'
remote_ip_prefix = cur_ip
else:
print({'create_security_group_rule_tool: error@create_sg()': 'not IPv4 nor IPv6: ' + cur_ip})
sys.exit(2)
except ValueError:
print({'create_security_group_rule_tool: error@create_sg()': 'invaild IP addr: ' + cur_ip})
sys.exit(2)
loca_ltime = time.asctime(time.localtime(time.time()))
try:
rule = CreateSecurityGroupRuleOption(security_group_id, description=loca_ltime, direction="ingress",
ethertype=ethertype, remote_ip_prefix=remote_ip_prefix)
body = CreateSecurityGroupRuleRequestBody(rule)
request = CreateSecurityGroupRuleRequest(body)
response = client.create_security_group_rule(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
if __name__ == "__main__":
start(sys.argv[1:])
(ak, sk, project_id, region, endpoint, security_group_id) = get_cred_config()
config = HttpConfig.get_default_config()
config.ignore_ssl_verification = False
credentials = BasicCredentials(ak, sk, project_id)
vpc_client = VpcClient.new_builder(VpcClient) \
.with_http_config(config) \
.with_credentials(credentials) \
.with_region(VpcRegion.value_of(region)) \
.build()
# list_vpc(vpc_client)
# list_sg(vpc_client)
create_sg(vpc_client, security_group_id)
| 29.334842 | 114 | 0.637051 |
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.http.http_config import HttpConfig
from huaweicloudsdkvpc.v2 import *
from huaweicloudsdkvpc.v2.region.vpc_region import VpcRegion
from urllib.request import urlopen
from json import load, loads
from Crypto.Cipher import AES
import time, os, base64, sys, getopt
import IPy
aes_key_from_cli = ''
ip_from_cli = ''
def start(argv):
if not argv:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
try:
opts, args = getopt.getopt(argv, "hk:i:", ["help", "key=", "ip="])
except getopt.GetoptError:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('# HCTool-XXX.py -k <aes_key> -i <ip_addr> OR \n# HCTool-XXX.py --key=<aes_key> --ip=<ip_addr>')
sys.exit()
elif opt in ("-k", "--key"):
global aes_key_from_cli
aes_key_from_cli = arg
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@start()': 'ERROR: key must not be NULL!'})
sys.exit(2)
else:
print({'create_security_group_rule_tool: message@start()': 'key is: ' + aes_key_from_cli})
elif opt in ("-i", "--ip"):
global ip_from_cli
ip_from_cli = arg
if ip_from_cli != '':
print({'create_security_group_rule_tool: message@start()': 'ip addr is: ' + ip_from_cli})
else:
print({'create_security_group_rule_tool: error@start()': 'ERROR: ip is NULL!'})
sys.exit(2)
def decrypt_env(en_val):
(aes_key, aes_iv, aes_mode) = (aes_key_from_cli, 'knx5FQtE4XOQ', AES.MODE_GCM)
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@decrypt_env()': 'ERROR: key must not be NULL!'})
sys.exit(2)
aes_de_instance = AES.new(aes_key.encode('utf-8'), aes_mode, aes_iv.encode('utf-8'))
plain_val = aes_de_instance.decrypt(base64.b64decode(en_val.encode('utf-8'))).decode('utf-8')
return plain_val
def get_cred_config():
en_env_data = os.getenv('EN_CRED_JSON_STR')
en_cred_dict = loads(en_env_data)
en_ak = en_cred_dict['EN_AK']
en_sk = en_cred_dict['EN_SK']
en_project_id = en_cred_dict['EN_ProjectID']
ak = decrypt_env(en_ak)
sk = decrypt_env(en_sk)
project_id = decrypt_env(en_project_id)
region = en_cred_dict['Region']
security_group_id = en_cred_dict['SecurityGroupID']
endpoint = "https://" + "vpc." + region + ".myhwclouds.com"
print({'create_security_group_rule_tool: message@get_cred_config()': 'current endpoint is: ' + endpoint})
return ak, sk, project_id, region, endpoint, security_group_id
def list_vpc(client):
try:
request = ListVpcsRequest()
response = client.list_vpcs(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def list_sg(client):
try:
request = ListSecurityGroupRulesRequest()
response = client.list_security_group_rules(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def get_pub_ip_from_inet():
ip_from_inet = ''
for num in range(1, 3):
if num == 1:
ip_from_inet = load(urlopen('https://httpbin.org/ip'))['origin']
elif num == 2:
ip_from_inet = load(urlopen('https://api.ipify.org/?format=json'))['ip']
else:
ip_from_inet = load(urlopen('https://jsonip.com'))['ip']
if IPy.IP(ip_from_inet).version() == 4:
break
return ip_from_inet
def create_sg(client, security_group_id):
global ip_from_cli
cur_ip = ip_from_cli
if cur_ip == '':
cur_ip = get_pub_ip_from_inet()
print({'create_security_group_rule_tool: message@create_sg()': 'current public network IP is: ' + cur_ip})
try:
if IPy.IP(cur_ip).version() == 6:
ethertype = 'IPv6'
remote_ip_prefix = cur_ip
elif IPy.IP(cur_ip).version() == 4:
ethertype = 'IPv4'
remote_ip_prefix = cur_ip
else:
print({'create_security_group_rule_tool: error@create_sg()': 'not IPv4 nor IPv6: ' + cur_ip})
sys.exit(2)
except ValueError:
print({'create_security_group_rule_tool: error@create_sg()': 'invaild IP addr: ' + cur_ip})
sys.exit(2)
loca_ltime = time.asctime(time.localtime(time.time()))
try:
rule = CreateSecurityGroupRuleOption(security_group_id, description=loca_ltime, direction="ingress",
ethertype=ethertype, remote_ip_prefix=remote_ip_prefix)
body = CreateSecurityGroupRuleRequestBody(rule)
request = CreateSecurityGroupRuleRequest(body)
response = client.create_security_group_rule(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
if __name__ == "__main__":
start(sys.argv[1:])
(ak, sk, project_id, region, endpoint, security_group_id) = get_cred_config()
config = HttpConfig.get_default_config()
config.ignore_ssl_verification = False
credentials = BasicCredentials(ak, sk, project_id)
vpc_client = VpcClient.new_builder(VpcClient) \
.with_http_config(config) \
.with_credentials(credentials) \
.with_region(VpcRegion.value_of(region)) \
.build()
create_sg(vpc_client, security_group_id)
| true | true |
f7155f2dc872416bc9d84bdcf46fa337e5c2a7ff | 2,567 | py | Python | aiida/orm/nodes/data/base.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T04:08:13.000Z | 2019-07-31T04:08:13.000Z | aiida/orm/nodes/data/base.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | aiida/orm/nodes/data/base.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`Data` sub class to be used as a base for data containers that represent base python data types."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
try:
from functools import singledispatch # Python 3.4+
except ImportError:
from singledispatch import singledispatch
from .data import Data
__all__ = ('BaseType', 'to_aiida_type')
@singledispatch
def to_aiida_type(value):
"""
Turns basic Python types (str, int, float, bool) into the corresponding AiiDA types.
"""
raise TypeError("Cannot convert value of type {} to AiiDA type.".format(type(value)))
@six.add_metaclass(abc.ABCMeta)
class BaseType(Data):
"""`Data` sub class to be used as a base for data containers that represent base python data types."""
def __init__(self, *args, **kwargs):
try:
getattr(self, '_type')
except AttributeError:
raise RuntimeError('Derived class must define the `_type` class member')
super(BaseType, self).__init__(**kwargs)
try:
value = args[0]
except IndexError:
value = self._type() # pylint: disable=no-member
self.value = value
@property
def value(self):
return self.get_attribute('value', None)
@value.setter
def value(self, value):
self.set_attribute('value', self._type(value)) # pylint: disable=no-member
def __str__(self):
return super(BaseType, self).__str__() + ' value: {}'.format(self.value)
def __eq__(self, other):
if isinstance(other, BaseType):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, BaseType):
return self.value != other.value
return self.value != other
def new(self, value=None):
return self.__class__(value)
| 32.910256 | 106 | 0.586287 | true | true | |
f7155fab8edcad4a0881545f1487a35076b0b70c | 1,983 | py | Python | matchms/exporting/save_as_json.py | maximskorik/matchms | 922f5afaef123a793194bdd74391027477cbb844 | [
"Apache-2.0"
] | null | null | null | matchms/exporting/save_as_json.py | maximskorik/matchms | 922f5afaef123a793194bdd74391027477cbb844 | [
"Apache-2.0"
] | null | null | null | matchms/exporting/save_as_json.py | maximskorik/matchms | 922f5afaef123a793194bdd74391027477cbb844 | [
"Apache-2.0"
] | null | null | null | import json
from typing import List
import numpy
from ..Spectrum import Spectrum
def save_as_json(spectrums: List[Spectrum], filename: str):
"""Save spectrum(s) as json file.
:py:attr:`~matchms.Spectrum.losses` of spectrum will not be saved.
Example:
.. code-block:: python
import numpy
from matchms import Spectrum
from matchms.exporting import save_as_json
# Create dummy spectrum
spectrum = Spectrum(mz=numpy.array([100, 200, 300], dtype="float"),
intensities=numpy.array([10, 10, 500], dtype="float"),
metadata={"charge": -1,
"inchi": '"InChI=1S/C6H12"',
"precursor_mz": 222.2})
# Write spectrum to test file
save_as_json(spectrum, "test.json")
Parameters
----------
spectrums:
Expected input is a list of :py:class:`~matchms.Spectrum.Spectrum` objects.
filename:
Provide filename to save spectrum(s).
"""
if not isinstance(spectrums, list):
# Assume that input was single Spectrum
spectrums = [spectrums]
# Write to json file
with open(filename, 'w', encoding="utf-8") as fout:
json.dump(spectrums, fout, cls=SpectrumJSONEncoder)
class SpectrumJSONEncoder(json.JSONEncoder):
# See https://github.com/PyCQA/pylint/issues/414 for reference
def default(self, o):
"""JSON Encoder which can encode a :py:class:`~matchms.Spectrum.Spectrum` object"""
if isinstance(o, Spectrum):
spec = o.clone()
peaks_list = numpy.vstack((spec.peaks.mz, spec.peaks.intensities)).T.tolist()
# Convert matchms.Spectrum() into dictionaries
spectrum_dict = {key: spec.metadata[key] for key in spec.metadata}
spectrum_dict["peaks_json"] = peaks_list
return spectrum_dict
return json.JSONEncoder.default(self, o)
| 33.610169 | 91 | 0.606657 | import json
from typing import List
import numpy
from ..Spectrum import Spectrum
def save_as_json(spectrums: List[Spectrum], filename: str):
if not isinstance(spectrums, list):
spectrums = [spectrums]
with open(filename, 'w', encoding="utf-8") as fout:
json.dump(spectrums, fout, cls=SpectrumJSONEncoder)
class SpectrumJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Spectrum):
spec = o.clone()
peaks_list = numpy.vstack((spec.peaks.mz, spec.peaks.intensities)).T.tolist()
spectrum_dict = {key: spec.metadata[key] for key in spec.metadata}
spectrum_dict["peaks_json"] = peaks_list
return spectrum_dict
return json.JSONEncoder.default(self, o)
| true | true |
f7156064ea7c64a030e87c3aff9f1fc1fc6f9c9f | 455 | py | Python | cvat/apps/git/migrations/0002_auto_20190123_1305.py | raunilillemets/cvat | c083b5d3a60270121abc3f3fe596ff94ae0eb60f | [
"MIT"
] | 2 | 2020-03-16T03:41:27.000Z | 2020-03-16T03:53:01.000Z | cvat/apps/git/migrations/0002_auto_20190123_1305.py | raunilillemets/cvat | c083b5d3a60270121abc3f3fe596ff94ae0eb60f | [
"MIT"
] | 29 | 2020-01-28T23:08:18.000Z | 2022-03-12T00:05:33.000Z | cvat/apps/git/migrations/0002_auto_20190123_1305.py | raunilillemets/cvat | c083b5d3a60270121abc3f3fe596ff94ae0eb60f | [
"MIT"
] | 7 | 2021-07-27T09:15:22.000Z | 2022-03-29T21:20:00.000Z | # Generated by Django 2.1.3 on 2019-01-23 10:05
import cvat.apps.git.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('git', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='gitdata',
name='status',
field=models.CharField(default=cvat.apps.git.models.GitStatusChoice('!sync'), max_length=20),
),
]
| 22.75 | 105 | 0.621978 |
import cvat.apps.git.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('git', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='gitdata',
name='status',
field=models.CharField(default=cvat.apps.git.models.GitStatusChoice('!sync'), max_length=20),
),
]
| true | true |
f71560690c6142fd8314899effc45720b4df6fdb | 1,016 | py | Python | Instagram/urls.py | samsoluoch/Instagram | ea6305c0592c8efe173cf3e6b5f1c477650678db | [
"MIT"
] | null | null | null | Instagram/urls.py | samsoluoch/Instagram | ea6305c0592c8efe173cf3e6b5f1c477650678db | [
"MIT"
] | null | null | null | Instagram/urls.py | samsoluoch/Instagram | ea6305c0592c8efe173cf3e6b5f1c477650678db | [
"MIT"
] | null | null | null | """instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('clone.urls')),
url(r'^', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
]
| 37.62963 | 79 | 0.687992 | from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('clone.urls')),
url(r'^', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
]
| true | true |
f7156125912203ecb4aa0ec75b8a4e46334dc991 | 7,299 | py | Python | setuper desktop app/gui/mainwindow/menubar.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | 22 | 2015-01-08T12:54:20.000Z | 2021-05-16T04:15:45.000Z | setuper desktop app/gui/mainwindow/menubar.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | null | null | null | setuper desktop app/gui/mainwindow/menubar.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | 11 | 2015-01-25T01:26:45.000Z | 2021-08-18T01:40:40.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from .guiconfig import collectView
class MenuBar(QtWidgets.QMenuBar):
viewID = "MenuBar"
@collectView
def __init__(self, parent):
super(MenuBar, self).__init__()
self.parent = parent
self.actionlists = {}
self.menusettings = {
'visual': False,
'menus': [
{
'name': self.tr('File'),
'trigger': 'File',
'actions': [
{
'name': self.tr('Settings'),
'icon': u'',
'shortcut': u'',
'trigger': 'Settings',
},
{
'name': self.tr('Language'),
'trigger': 'Language',
'type': 'submenu',
'actions': [
{
'name': 'English',
'icon': u'',
'shortcut': u'',
'trigger': 'English',
"checkable": True
},
{
'name': 'Chinese',
'icon': u'',
'shortcut': u'',
'trigger': 'Chinese',
"checkable": True
},
]
},
{
'name': self.tr('Exit'),
'icon': u'',
'shortcut': u'',
'trigger': 'Exit',
},
]
},
{
'name': self.tr('Screen'),
'trigger': 'Screen',
'actions': [
{
'name': self.tr('MFD3'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD3',
"checkable": True
},
{
'name': self.tr('MFD4'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD4',
"checkable": True
},
]
},
{
'name': self.tr('Device'),
'trigger': 'Device',
'actions': [
# {
# 'name': self.tr('Enable Bluetooth'),
# 'icon': u'',
# 'shortcut': u'',
# 'trigger': 'EnableBluetooth',
# },
{
'name': self.tr('Search Devices'),
'icon': u'',
'shortcut': u'',
'trigger': 'SearchDevices',
},
]
},
{
'name': self.tr('View'),
'trigger': 'View',
'actions': [
]
},
{
'name': self.tr('Report'),
'trigger': 'Test Rig',
'actions': [
{
'name': self.tr('Report'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRigAll',
},
{
'name': self.tr('Start'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRig',
}
]
},
{
'name': self.tr(' Help '),
'trigger': 'Help',
'actions': [
{
'name': self.tr('About ALE'),
'icon': u'',
'shortcut': u'',
'trigger': 'About',
},
{
'name': self.tr('Feedback to us'),
'icon': u'',
'shortcut': u'',
'trigger': 'Feedbackus',
},
]
}
]
}
self.creatMenus(self.menusettings)
def creatMenus(self, menusettings):
self.setVisible(menusettings['visual'])
for menu in menusettings['menus']:
setattr(
self,
'%smenu' % menu['trigger'],
self.addMenu(u'%s' % menu['name'])
)
submenu = getattr(self, '%smenu' % menu['trigger'])
for menuaction in menu['actions']:
if 'type' in menuaction and menuaction['type'] == "submenu":
self.createSubAction(menu['trigger'], menuaction)
else:
self.creatAction(submenu, menuaction)
def createSubAction(self, pmenu_name, menu):
childmenu = getattr(self, '%smenu' % pmenu_name)
submenu = childmenu.addMenu(u'%s' % menu['name'])
setattr(
self,
'%smenu' % menu['trigger'],
submenu)
for menuaction in menu['actions']:
self.creatAction(submenu, menuaction)
def creatAction(self, submenu, menuaction):
if 'checkable' in menuaction:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
checkable=menuaction['checkable']
)
)
else:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
)
)
action = getattr(self, '%sAction' % menuaction['trigger'])
action.setShortcut(QtGui.QKeySequence(menuaction['shortcut']))
submenu.addAction(action)
self.actionlists.update({menuaction['trigger']: action})
if hasattr(self.parent, 'action%s' % menuaction['trigger']):
action.triggered.connect(
getattr(self.parent, 'action%s' % menuaction['trigger'])
)
| 35.779412 | 76 | 0.307028 |
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from .guiconfig import collectView
class MenuBar(QtWidgets.QMenuBar):
viewID = "MenuBar"
@collectView
def __init__(self, parent):
super(MenuBar, self).__init__()
self.parent = parent
self.actionlists = {}
self.menusettings = {
'visual': False,
'menus': [
{
'name': self.tr('File'),
'trigger': 'File',
'actions': [
{
'name': self.tr('Settings'),
'icon': u'',
'shortcut': u'',
'trigger': 'Settings',
},
{
'name': self.tr('Language'),
'trigger': 'Language',
'type': 'submenu',
'actions': [
{
'name': 'English',
'icon': u'',
'shortcut': u'',
'trigger': 'English',
"checkable": True
},
{
'name': 'Chinese',
'icon': u'',
'shortcut': u'',
'trigger': 'Chinese',
"checkable": True
},
]
},
{
'name': self.tr('Exit'),
'icon': u'',
'shortcut': u'',
'trigger': 'Exit',
},
]
},
{
'name': self.tr('Screen'),
'trigger': 'Screen',
'actions': [
{
'name': self.tr('MFD3'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD3',
"checkable": True
},
{
'name': self.tr('MFD4'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD4',
"checkable": True
},
]
},
{
'name': self.tr('Device'),
'trigger': 'Device',
'actions': [
{
'name': self.tr('Search Devices'),
'icon': u'',
'shortcut': u'',
'trigger': 'SearchDevices',
},
]
},
{
'name': self.tr('View'),
'trigger': 'View',
'actions': [
]
},
{
'name': self.tr('Report'),
'trigger': 'Test Rig',
'actions': [
{
'name': self.tr('Report'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRigAll',
},
{
'name': self.tr('Start'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRig',
}
]
},
{
'name': self.tr(' Help '),
'trigger': 'Help',
'actions': [
{
'name': self.tr('About ALE'),
'icon': u'',
'shortcut': u'',
'trigger': 'About',
},
{
'name': self.tr('Feedback to us'),
'icon': u'',
'shortcut': u'',
'trigger': 'Feedbackus',
},
]
}
]
}
self.creatMenus(self.menusettings)
def creatMenus(self, menusettings):
self.setVisible(menusettings['visual'])
for menu in menusettings['menus']:
setattr(
self,
'%smenu' % menu['trigger'],
self.addMenu(u'%s' % menu['name'])
)
submenu = getattr(self, '%smenu' % menu['trigger'])
for menuaction in menu['actions']:
if 'type' in menuaction and menuaction['type'] == "submenu":
self.createSubAction(menu['trigger'], menuaction)
else:
self.creatAction(submenu, menuaction)
def createSubAction(self, pmenu_name, menu):
childmenu = getattr(self, '%smenu' % pmenu_name)
submenu = childmenu.addMenu(u'%s' % menu['name'])
setattr(
self,
'%smenu' % menu['trigger'],
submenu)
for menuaction in menu['actions']:
self.creatAction(submenu, menuaction)
def creatAction(self, submenu, menuaction):
if 'checkable' in menuaction:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
checkable=menuaction['checkable']
)
)
else:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
)
)
action = getattr(self, '%sAction' % menuaction['trigger'])
action.setShortcut(QtGui.QKeySequence(menuaction['shortcut']))
submenu.addAction(action)
self.actionlists.update({menuaction['trigger']: action})
if hasattr(self.parent, 'action%s' % menuaction['trigger']):
action.triggered.connect(
getattr(self.parent, 'action%s' % menuaction['trigger'])
)
| true | true |
f715614c9c22bf521a77e23832bea7384f69ed20 | 729 | py | Python | var/spack/repos/builtin/packages/smartmontools/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/smartmontools/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/smartmontools/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Smartmontools(AutotoolsPackage):
"""S.M.A.R.T. utility toolset."""
homepage = "https://smartmontools.sourceforge.net"
url = "https://nchc.dl.sourceforge.net/project/smartmontools/smartmontools/6.6/smartmontools-6.6.tar.gz"
version('6.6', sha256='51f43d0fb064fccaf823bbe68cf0d317d0895ff895aa353b3339a3b316a53054')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
env.prepend_path('LD_LIBRARY_PATH', self.prefix.usr.lib)
| 36.45 | 113 | 0.742112 |
from spack.package import *
class Smartmontools(AutotoolsPackage):
homepage = "https://smartmontools.sourceforge.net"
url = "https://nchc.dl.sourceforge.net/project/smartmontools/smartmontools/6.6/smartmontools-6.6.tar.gz"
version('6.6', sha256='51f43d0fb064fccaf823bbe68cf0d317d0895ff895aa353b3339a3b316a53054')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
env.prepend_path('LD_LIBRARY_PATH', self.prefix.usr.lib)
| true | true |
f71563894bd2d01be507073cac15fd01a629492a | 201,289 | py | Python | pyfakefs/fake_filesystem.py | jcwilson/pyfakefs | 95f15b7de426f6f6c75181f6d06abb6a75bba668 | [
"Apache-2.0"
] | null | null | null | pyfakefs/fake_filesystem.py | jcwilson/pyfakefs | 95f15b7de426f6f6c75181f6d06abb6a75bba668 | [
"Apache-2.0"
] | null | null | null | pyfakefs/fake_filesystem.py | jcwilson/pyfakefs | 95f15b7de426f6f6c75181f6d06abb6a75bba668 | [
"Apache-2.0"
] | null | null | null | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A fake filesystem implementation for unit testing.
:Includes:
* :py:class:`FakeFile`: Provides the appearance of a real file.
* :py:class:`FakeDirectory`: Provides the appearance of a real directory.
* :py:class:`FakeFilesystem`: Provides the appearance of a real directory
hierarchy.
* :py:class:`FakeOsModule`: Uses :py:class:`FakeFilesystem` to provide a
fake :py:mod:`os` module replacement.
* :py:class:`FakeIoModule`: Uses :py:class:`FakeFilesystem` to provide a
fake ``io`` module replacement.
* :py:class:`FakePathModule`: Faked ``os.path`` module replacement.
* :py:class:`FakeFileOpen`: Faked ``file()`` and ``open()`` function
replacements.
:Usage:
>>> from pyfakefs import fake_filesystem
>>> filesystem = fake_filesystem.FakeFilesystem()
>>> os_module = fake_filesystem.FakeOsModule(filesystem)
>>> pathname = '/a/new/dir/new-file'
Create a new file object, creating parent directory objects as needed:
>>> os_module.path.exists(pathname)
False
>>> new_file = filesystem.create_file(pathname)
File objects can't be overwritten:
>>> os_module.path.exists(pathname)
True
>>> try:
... filesystem.create_file(pathname)
... except OSError as e:
... assert e.errno == errno.EEXIST, 'unexpected errno: %d' % e.errno
... assert e.strerror == 'File exists in the fake filesystem'
Remove a file object:
>>> filesystem.remove_object(pathname)
>>> os_module.path.exists(pathname)
False
Create a new file object at the previous path:
>>> beatles_file = filesystem.create_file(pathname,
... contents='Dear Prudence\\nWon\\'t you come out to play?\\n')
>>> os_module.path.exists(pathname)
True
Use the FakeFileOpen class to read fake file objects:
>>> file_module = fake_filesystem.FakeFileOpen(filesystem)
>>> for line in file_module(pathname):
... print(line.rstrip())
...
Dear Prudence
Won't you come out to play?
File objects cannot be treated like directory objects:
>>> try:
... os_module.listdir(pathname)
... except OSError as e:
... assert e.errno == errno.ENOTDIR, 'unexpected errno: %d' % e.errno
... assert e.strerror == 'Not a directory in the fake filesystem'
The FakeOsModule can list fake directory objects:
>>> os_module.listdir(os_module.path.dirname(pathname))
['new-file']
The FakeOsModule also supports stat operations:
>>> import stat
>>> stat.S_ISREG(os_module.stat(pathname).st_mode)
True
>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)
True
"""
import errno
import heapq
import io
import locale
import os
import sys
import time
import uuid
from collections import namedtuple
from stat import (
S_IFREG, S_IFDIR, S_ISLNK, S_IFMT, S_ISDIR, S_IFLNK, S_ISREG, S_IFSOCK
)
from pyfakefs.deprecator import Deprecator
from pyfakefs.extra_packages import use_scandir
from pyfakefs.fake_scandir import scandir, walk
from pyfakefs.helpers import (
FakeStatResult, FileBufferIO, NullFileBufferIO,
is_int_type, is_byte_string, is_unicode_string,
make_string_path, IS_WIN, to_string)
from pyfakefs import __version__ # noqa: F401 for upwards compatibility
__pychecker__ = 'no-reimportself'
PERM_READ = 0o400 # Read permission bit.
PERM_WRITE = 0o200 # Write permission bit.
PERM_EXE = 0o100 # Execute permission bit.
PERM_DEF = 0o777 # Default permission bits.
PERM_DEF_FILE = 0o666 # Default permission bits (regular file)
PERM_ALL = 0o7777 # All permission bits.
_OpenModes = namedtuple(
'open_modes',
'must_exist can_read can_write truncate append must_not_exist'
)
_OPEN_MODE_MAP = {
# mode name:(file must exist, can read, can write,
# truncate, append, must not exist)
'r': (True, True, False, False, False, False),
'w': (False, False, True, True, False, False),
'a': (False, False, True, False, True, False),
'r+': (True, True, True, False, False, False),
'w+': (False, True, True, True, False, False),
'a+': (False, True, True, False, True, False),
'x': (False, False, True, False, False, True),
'x+': (False, True, True, False, False, True)
}
if sys.platform.startswith('linux'):
# on newer Linux system, the default maximum recursion depth is 40
# we ignore older systems here
_MAX_LINK_DEPTH = 40
else:
# on MacOS and Windows, the maximum recursion depth is 32
_MAX_LINK_DEPTH = 32
NR_STD_STREAMS = 3
USER_ID = 1 if IS_WIN else os.getuid()
GROUP_ID = 1 if IS_WIN else os.getgid()
def set_uid(uid):
"""Set the global user id. This is used as st_uid for new files
and to differentiate between a normal user and the root user (uid 0).
For the root user, some permission restrictions are ignored.
Args:
uid: (int) the user ID of the user calling the file system functions.
"""
global USER_ID
USER_ID = uid
def set_gid(gid):
"""Set the global group id. This is only used to set st_gid for new files,
no permision checks are performed.
Args:
gid: (int) the group ID of the user calling the file system functions.
"""
global GROUP_ID
GROUP_ID = gid
def reset_ids():
"""Set the global user ID and group ID back to default values."""
set_uid(1 if IS_WIN else os.getuid())
set_gid(1 if IS_WIN else os.getgid())
def is_root():
"""Return True if the current user is the root user."""
return USER_ID == 0
class FakeLargeFileIoException(Exception):
"""Exception thrown on unsupported operations for fake large files.
Fake large files have a size with no real content.
"""
def __init__(self, file_path):
super(FakeLargeFileIoException, self).__init__(
'Read and write operations not supported for '
'fake large file: %s' % file_path)
def _copy_module(old):
"""Recompiles and creates new module object."""
saved = sys.modules.pop(old.__name__, None)
new = __import__(old.__name__)
sys.modules[old.__name__] = saved
return new
class FakeFile:
"""Provides the appearance of a real file.
Attributes currently faked out:
* `st_mode`: user-specified, otherwise S_IFREG
* `st_ctime`: the time.time() timestamp of the file change time (updated
each time a file's attributes is modified).
* `st_atime`: the time.time() timestamp when the file was last accessed.
* `st_mtime`: the time.time() timestamp when the file was last modified.
* `st_size`: the size of the file
* `st_nlink`: the number of hard links to the file
* `st_ino`: the inode number - a unique number identifying the file
* `st_dev`: a unique number identifying the (fake) file system device
the file belongs to
* `st_uid`: always set to USER_ID, which can be changed globally using
`set_uid`
* `st_gid`: always set to GROUP_ID, which can be changed globally using
`set_gid`
.. note:: The resolution for `st_ctime`, `st_mtime` and `st_atime` in the
real file system depends on the used file system (for example it is
only 1s for HFS+ and older Linux file systems, but much higher for
ext4 and NTFS). This is currently ignored by pyfakefs, which uses
the resolution of `time.time()`.
Under Windows, `st_atime` is not updated for performance reasons by
default. pyfakefs never updates `st_atime` under Windows, assuming
the default setting.
"""
stat_types = (
'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime',
'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns'
)
def __init__(self, name, st_mode=S_IFREG | PERM_DEF_FILE,
contents=None, filesystem=None, encoding=None, errors=None,
side_effect=None):
"""
Args:
name: Name of the file/directory, without parent path information
st_mode: The stat.S_IF* constant representing the file type (i.e.
stat.S_IFREG, stat.S_IFDIR)
contents: The contents of the filesystem object; should be a string
or byte object for regular files, and a list of other
FakeFile or FakeDirectory objects for FakeDirectory objects
filesystem: The fake filesystem where the file is created.
encoding: If contents is a unicode string, the encoding used
for serialization.
errors: The error mode used for encoding/decoding errors.
side_effect: function handle that is executed when file is written,
must accept the file object as an argument.
"""
# to be backwards compatible regarding argument order, we raise on None
if filesystem is None:
raise ValueError('filesystem shall not be None')
self.filesystem = filesystem
self._side_effect = side_effect
self.name = name
self.stat_result = FakeStatResult(
filesystem.is_windows_fs, USER_ID, GROUP_ID, time.time())
self.stat_result.st_mode = st_mode
self.encoding = encoding
self.errors = errors or 'strict'
self._byte_contents = self._encode_contents(contents)
self.stat_result.st_size = (
len(self._byte_contents) if self._byte_contents is not None else 0)
self.epoch = 0
self.parent_dir = None
# Linux specific: extended file system attributes
self.xattr = {}
@property
def byte_contents(self):
"""Return the contents as raw byte array."""
return self._byte_contents
@property
def contents(self):
"""Return the contents as string with the original encoding."""
if isinstance(self.byte_contents, bytes):
return self.byte_contents.decode(
self.encoding or locale.getpreferredencoding(False),
errors=self.errors)
return self.byte_contents
@property
def st_ctime(self):
"""Return the creation time of the fake file."""
return self.stat_result.st_ctime
@property
def st_atime(self):
"""Return the access time of the fake file."""
return self.stat_result.st_atime
@property
def st_mtime(self):
"""Return the modification time of the fake file."""
return self.stat_result.st_mtime
@st_ctime.setter
def st_ctime(self, val):
"""Set the creation time of the fake file."""
self.stat_result.st_ctime = val
@st_atime.setter
def st_atime(self, val):
"""Set the access time of the fake file."""
self.stat_result.st_atime = val
@st_mtime.setter
def st_mtime(self, val):
"""Set the modification time of the fake file."""
self.stat_result.st_mtime = val
def set_large_file_size(self, st_size):
"""Sets the self.st_size attribute and replaces self.content with None.
Provided specifically to simulate very large files without regards
to their content (which wouldn't fit in memory).
Note that read/write operations with such a file raise
:py:class:`FakeLargeFileIoException`.
Args:
st_size: (int) The desired file size
Raises:
OSError: if the st_size is not a non-negative integer,
or if st_size exceeds the available file system space
"""
self._check_positive_int(st_size)
if self.st_size:
self.size = 0
if self.filesystem:
self.filesystem.change_disk_usage(st_size, self.name, self.st_dev)
self.st_size = st_size
self._byte_contents = None
def _check_positive_int(self, size):
# the size should be an positive integer value
if not is_int_type(size) or size < 0:
self.filesystem.raise_os_error(errno.ENOSPC, self.name)
def is_large_file(self):
"""Return `True` if this file was initialized with size but no contents.
"""
return self._byte_contents is None
def _encode_contents(self, contents):
if is_unicode_string(contents):
contents = bytes(
contents,
self.encoding or locale.getpreferredencoding(False),
self.errors)
return contents
def _set_initial_contents(self, contents):
"""Sets the file contents and size.
Called internally after initial file creation.
Args:
contents: string, new content of file.
Returns:
True if the contents have been changed.
Raises:
OSError: if the st_size is not a non-negative integer,
or if st_size exceeds the available file system space
"""
contents = self._encode_contents(contents)
changed = self._byte_contents != contents
st_size = len(contents)
if self._byte_contents:
self.size = 0
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
self._byte_contents = contents
self.st_size = st_size
self.epoch += 1
return changed
def set_contents(self, contents, encoding=None):
"""Sets the file contents and size and increases the modification time.
Also executes the side_effects if available.
Args:
contents: (str, bytes, unicode) new content of file.
encoding: (str) the encoding to be used for writing the contents
if they are a unicode string.
If not given, the locale preferred encoding is used.
Raises:
OSError: if `st_size` is not a non-negative integer,
or if it exceeds the available file system space.
"""
self.encoding = encoding
changed = self._set_initial_contents(contents)
if self._side_effect is not None:
self._side_effect(self)
return changed
@property
def size(self):
"""Return the size in bytes of the file contents.
"""
return self.st_size
@property
def path(self):
"""Return the full path of the current object."""
names = []
obj = self
while obj:
names.insert(0, obj.name)
obj = obj.parent_dir
sep = self.filesystem._path_separator(self.name)
if names[0] == sep:
names.pop(0)
dir_path = sep.join(names)
# Windows paths with drive have a root separator entry
# which should be removed
is_drive = names and len(names[0]) == 2 and names[0][1] == ':'
if not is_drive:
dir_path = sep + dir_path
else:
dir_path = sep.join(names)
dir_path = self.filesystem.absnormpath(dir_path)
return dir_path
@Deprecator('property path')
def GetPath(self):
return self.path
@Deprecator('property size')
def GetSize(self):
return self.size
@size.setter
def size(self, st_size):
"""Resizes file content, padding with nulls if new size exceeds the
old size.
Args:
st_size: The desired size for the file.
Raises:
OSError: if the st_size arg is not a non-negative integer
or if st_size exceeds the available file system space
"""
self._check_positive_int(st_size)
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
if self._byte_contents:
if st_size < current_size:
self._byte_contents = self._byte_contents[:st_size]
else:
self._byte_contents += b'\0' * (st_size - current_size)
self.st_size = st_size
self.epoch += 1
@Deprecator('property size')
def SetSize(self, value):
self.size = value
@Deprecator('property st_atime')
def SetATime(self, st_atime):
"""Set the self.st_atime attribute.
Args:
st_atime: The desired access time.
"""
self.st_atime = st_atime
@Deprecator('property st_mtime')
def SetMTime(self, st_mtime):
"""Set the self.st_mtime attribute.
Args:
st_mtime: The desired modification time.
"""
self.st_mtime = st_mtime
@Deprecator('property st_ctime')
def SetCTime(self, st_ctime):
"""Set the self.st_ctime attribute.
Args:
st_ctime: The desired creation time.
"""
self.st_ctime = st_ctime
def __getattr__(self, item):
"""Forward some properties to stat_result."""
if item in self.stat_types:
return getattr(self.stat_result, item)
return super(FakeFile, self).__getattr__(item)
def __setattr__(self, key, value):
"""Forward some properties to stat_result."""
if key in self.stat_types:
return setattr(self.stat_result, key, value)
return super(FakeFile, self).__setattr__(key, value)
def __str__(self):
return '%s(%o)' % (self.name, self.st_mode)
@Deprecator('st_ino')
def SetIno(self, st_ino):
"""Set the self.st_ino attribute.
Note that a unique inode is assigned automatically to a new fake file.
This function does not guarantee uniqueness and should be used with
caution.
Args:
st_ino: (int) The desired inode.
"""
self.st_ino = st_ino
class FakeNullFile(FakeFile):
def __init__(self, filesystem):
devnull = '/dev/nul' if filesystem.is_windows_fs else '/dev/nul'
super(FakeNullFile, self).__init__(
devnull, filesystem=filesystem, contents=b'')
@property
def byte_contents(self):
return b''
def _set_initial_contents(self, contents):
pass
Deprecator.add(FakeFile, FakeFile.set_large_file_size, 'SetLargeFileSize')
Deprecator.add(FakeFile, FakeFile.set_contents, 'SetContents')
Deprecator.add(FakeFile, FakeFile.is_large_file, 'IsLargeFile')
class FakeFileFromRealFile(FakeFile):
"""Represents a fake file copied from the real file system.
The contents of the file are read on demand only.
"""
def __init__(self, file_path, filesystem, side_effect=None):
"""
Args:
file_path: Path to the existing file.
filesystem: The fake filesystem where the file is created.
Raises:
OSError: if the file does not exist in the real file system.
OSError: if the file already exists in the fake file system.
"""
super(FakeFileFromRealFile, self).__init__(
name=os.path.basename(file_path), filesystem=filesystem,
side_effect=side_effect)
self.contents_read = False
@property
def byte_contents(self):
if not self.contents_read:
self.contents_read = True
with io.open(self.file_path, 'rb') as f:
self._byte_contents = f.read()
# On MacOS and BSD, the above io.open() updates atime on the real file
self.st_atime = os.stat(self.file_path).st_atime
return self._byte_contents
def set_contents(self, contents, encoding=None):
self.contents_read = True
super(FakeFileFromRealFile, self).set_contents(contents, encoding)
def is_large_file(self):
"""The contents are never faked."""
return False
class FakeDirectory(FakeFile):
"""Provides the appearance of a real directory."""
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
"""
Args:
name: name of the file/directory, without parent path information
perm_bits: permission bits. defaults to 0o777.
filesystem: if set, the fake filesystem where the directory
is created
"""
FakeFile.__init__(
self, name, S_IFDIR | perm_bits, {}, filesystem=filesystem)
# directories have the link count of contained entries,
# inclusing '.' and '..'
self.st_nlink += 1
def set_contents(self, contents, encoding=None):
raise self.filesystem.raise_os_error(errno.EISDIR, self.path)
@property
def contents(self):
"""Return the list of contained directory entries."""
return self.byte_contents
@property
def ordered_dirs(self):
"""Return the list of contained directory entry names ordered by
creation order.
"""
return [item[0] for item in sorted(
self.byte_contents.items(), key=lambda entry: entry[1].st_ino)]
def add_entry(self, path_object):
"""Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
"""
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
raise OSError(errno.EACCES, 'Permission Denied', self.path)
path_object_name = to_string(path_object.name)
if path_object_name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object_name] = path_object
path_object.parent_dir = self
if path_object.st_ino is None:
self.filesystem.last_ino += 1
path_object.st_ino = self.filesystem.last_ino
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev)
def get_entry(self, pathname_name):
"""Retrieves the specified child file or directory entry.
Args:
pathname_name: The basename of the child object to retrieve.
Returns:
The fake file or directory object.
Raises:
KeyError: if no child exists by the specified name.
"""
pathname_name = self._normalized_entryname(pathname_name)
return self.contents[to_string(pathname_name)]
def _normalized_entryname(self, pathname_name):
if not self.filesystem.is_case_sensitive:
matching_names = [name for name in self.contents
if name.lower() == pathname_name.lower()]
if matching_names:
pathname_name = matching_names[0]
return pathname_name
def remove_entry(self, pathname_name, recursive=True):
"""Removes the specified child file or directory.
Args:
pathname_name: Basename of the child object to remove.
recursive: If True (default), the entries in contained directories
are deleted first. Used to propagate removal errors
(e.g. permission problems) from contained entries.
Raises:
KeyError: if no child exists by the specified name.
OSError: if user lacks permission to delete the file,
or (Windows only) the file is open.
"""
pathname_name = self._normalized_entryname(pathname_name)
entry = self.get_entry(pathname_name)
if self.filesystem.is_windows_fs:
if entry.st_mode & PERM_WRITE == 0:
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if self.filesystem.has_open_file(entry):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
else:
if (not is_root() and (self.st_mode & (PERM_WRITE | PERM_EXE) !=
PERM_WRITE | PERM_EXE)):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if recursive and isinstance(entry, FakeDirectory):
while entry.contents:
entry.remove_entry(list(entry.contents)[0])
elif entry.st_nlink == 1:
self.filesystem.change_disk_usage(
-entry.size, pathname_name, entry.st_dev)
self.st_nlink -= 1
entry.st_nlink -= 1
assert entry.st_nlink >= 0
del self.contents[to_string(pathname_name)]
@property
def size(self):
"""Return the total size of all files contained in this directory tree.
"""
return sum([item[1].size for item in self.contents.items()])
@Deprecator('property size')
def GetSize(self):
return self.size
def has_parent_object(self, dir_object):
"""Return `True` if dir_object is a direct or indirect parent
directory, or if both are the same object."""
obj = self
while obj:
if obj == dir_object:
return True
obj = obj.parent_dir
return False
def __str__(self):
description = super(FakeDirectory, self).__str__() + ':\n'
for item in self.contents:
item_desc = self.contents[item].__str__()
for line in item_desc.split('\n'):
if line:
description = description + ' ' + line + '\n'
return description
Deprecator.add(FakeDirectory, FakeDirectory.add_entry, 'AddEntry')
Deprecator.add(FakeDirectory, FakeDirectory.get_entry, 'GetEntry')
Deprecator.add(FakeDirectory, FakeDirectory.set_contents, 'SetContents')
Deprecator.add(FakeDirectory, FakeDirectory.remove_entry, 'RemoveEntry')
class FakeDirectoryFromRealDirectory(FakeDirectory):
"""Represents a fake directory copied from the real file system.
The contents of the directory are read on demand only.
"""
def __init__(self, source_path, filesystem, read_only,
target_path=None):
"""
Args:
source_path: Full directory path.
filesystem: The fake filesystem where the directory is created.
read_only: If set, all files under the directory are treated
as read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files
only as usually.
target_path: If given, the target path of the directory,
otherwise the target is the same as `source_path`.
Raises:
OSError: if the directory does not exist in the real file system
"""
target_path = target_path or source_path
real_stat = os.stat(source_path)
super(FakeDirectoryFromRealDirectory, self).__init__(
name=os.path.split(target_path)[1],
perm_bits=real_stat.st_mode,
filesystem=filesystem)
self.st_ctime = real_stat.st_ctime
self.st_atime = real_stat.st_atime
self.st_mtime = real_stat.st_mtime
self.st_gid = real_stat.st_gid
self.st_uid = real_stat.st_uid
self.source_path = source_path
self.read_only = read_only
self.contents_read = False
@property
def contents(self):
"""Return the list of contained directory entries, loading them
if not already loaded."""
if not self.contents_read:
self.contents_read = True
base = self.path
for entry in os.listdir(self.source_path):
source_path = os.path.join(self.source_path, entry)
target_path = os.path.join(base, entry)
if os.path.islink(source_path):
self.filesystem.add_real_symlink(source_path, target_path)
elif os.path.isdir(source_path):
self.filesystem.add_real_directory(
source_path, self.read_only, target_path=target_path)
else:
self.filesystem.add_real_file(
source_path, self.read_only, target_path=target_path)
return self.byte_contents
@property
def size(self):
# we cannot get the size until the contents are loaded
if not self.contents_read:
return 0
return super(FakeDirectoryFromRealDirectory, self).size
class FakeFilesystem:
"""Provides the appearance of a real directory tree for unit testing.
Attributes:
path_separator: The path separator, corresponds to `os.path.sep`.
alternative_path_separator: Corresponds to `os.path.altsep`.
is_windows_fs: `True` in a real or faked Windows file system.
is_macos: `True` under MacOS, or if we are faking it.
is_case_sensitive: `True` if a case-sensitive file system is assumed.
root: The root :py:class:`FakeDirectory` entry of the file system.
cwd: The current working directory path.
umask: The umask used for newly created files, see `os.umask`.
patcher: Holds the Patcher object if created from it. Allows access
to the patcher object if using the pytest fs fixture.
"""
def __init__(self, path_separator=os.path.sep, total_size=None,
patcher=None):
"""
Args:
path_separator: optional substitute for os.path.sep
total_size: if not None, the total size in bytes of the
root filesystem.
Example usage to use the same path separator under all systems:
>>> filesystem = FakeFilesystem(path_separator='/')
"""
self.path_separator = path_separator
self.alternative_path_separator = os.path.altsep
self.patcher = patcher
if path_separator != os.sep:
self.alternative_path_separator = None
# is_windows_fs can be used to test the behavior of pyfakefs under
# Windows fs on non-Windows systems and vice verse;
# is it used to support drive letters, UNC paths and some other
# Windows-specific features
self.is_windows_fs = sys.platform == 'win32'
# can be used to test some MacOS-specific behavior under other systems
self.is_macos = sys.platform == 'darwin'
# is_case_sensitive can be used to test pyfakefs for case-sensitive
# file systems on non-case-sensitive systems and vice verse
self.is_case_sensitive = not (self.is_windows_fs or self.is_macos)
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
# We can't query the current value without changing it:
self.umask = os.umask(0o22)
os.umask(self.umask)
# A list of open file objects. Their position in the list is their
# file descriptor number
self.open_files = []
# A heap containing all free positions in self.open_files list
self._free_fd_heap = []
# last used numbers for inodes (st_ino) and devices (st_dev)
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
self.dev_null = FakeNullFile(self)
@property
def is_linux(self):
return not self.is_windows_fs and not self.is_macos
def reset(self, total_size=None):
"""Remove all file system contents and reset the root."""
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.open_files = []
self._free_fd_heap = []
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
def pause(self):
"""Pause the patching of the file system modules until `resume` is
called. After that call, all file system calls are executed in the
real file system.
Calling pause() twice is silently ignored.
Only allowed if the file system object was created by a
Patcher object. This is also the case for the pytest `fs` fixture.
Raises:
RuntimeError: if the file system was not created by a Patcher.
"""
if self.patcher is None:
raise RuntimeError('pause() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.pause()
def resume(self):
"""Resume the patching of the file system modules if `pause` has
been called before. After that call, all file system calls are
executed in the fake file system.
Does nothing if patching is not paused.
Raises:
RuntimeError: if the file system has not been created by `Patcher`.
"""
if self.patcher is None:
raise RuntimeError('resume() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.resume()
def line_separator(self):
return '\r\n' if self.is_windows_fs else '\n'
def _error_message(self, errno):
return os.strerror(errno) + ' in the fake filesystem'
def raise_os_error(self, errno, filename=None, winerror=None):
"""Raises OSError.
The error message is constructed from the given error code and shall
start with the error string issued in the real system.
Note: this is not true under Windows if winerror is given - in this
case a localized message specific to winerror will be shown in the
real file system.
Args:
errno: A numeric error code from the C variable errno.
filename: The name of the affected file, if any.
winerror: Windows only - the specific Windows error code.
"""
message = self._error_message(errno)
if (winerror is not None and sys.platform == 'win32' and
self.is_windows_fs):
raise OSError(errno, message, filename, winerror)
raise OSError(errno, message, filename)
@staticmethod
def _matching_string(matched, string):
"""Return the string as byte or unicode depending
on the type of matched, assuming string is an ASCII string.
"""
if string is None:
return string
if isinstance(matched, bytes) and isinstance(string, str):
return string.encode(locale.getpreferredencoding(False))
return string
def _path_separator(self, path):
"""Return the path separator as the same type as path"""
return self._matching_string(path, self.path_separator)
def _alternative_path_separator(self, path):
"""Return the alternative path separator as the same type as path"""
return self._matching_string(path, self.alternative_path_separator)
def add_mount_point(self, path, total_size=None):
"""Add a new mount point for a filesystem device.
The mount point gets a new unique device number.
Args:
path: The root path for the new mount path.
total_size: The new total size of the added filesystem device
in bytes. Defaults to infinite size.
Returns:
The newly created mount point dict.
Raises:
OSError: if trying to mount an existing mount point again.
"""
path = self.absnormpath(path)
if path in self.mount_points:
self.raise_os_error(errno.EEXIST, path)
self.last_dev += 1
self.mount_points[path] = {
'idev': self.last_dev, 'total_size': total_size, 'used_size': 0
}
# special handling for root path: has been created before
if path == self.root.name:
root_dir = self.root
self.last_ino += 1
root_dir.st_ino = self.last_ino
else:
root_dir = self.create_dir(path)
root_dir.st_dev = self.last_dev
return self.mount_points[path]
def _auto_mount_drive_if_needed(self, path, force=False):
if (self.is_windows_fs and
(force or not self._mount_point_for_path(path))):
drive = self.splitdrive(path)[0]
if drive:
return self.add_mount_point(path=drive)
def _mount_point_for_path(self, path):
def to_str(string):
"""Convert the str, unicode or byte object to a str
using the default encoding."""
if string is None or isinstance(string, str):
return string
return string.decode(locale.getpreferredencoding(False))
path = self.absnormpath(self._original_path(path))
if path in self.mount_points:
return self.mount_points[path]
mount_path = self._matching_string(path, '')
drive = self.splitdrive(path)[:1]
for root_path in self.mount_points:
root_path = self._matching_string(path, root_path)
if drive and not root_path.startswith(drive):
continue
if path.startswith(root_path) and len(root_path) > len(mount_path):
mount_path = root_path
if mount_path:
return self.mount_points[to_str(mount_path)]
mount_point = self._auto_mount_drive_if_needed(path, force=True)
assert mount_point
return mount_point
def _mount_point_for_device(self, idev):
for mount_point in self.mount_points.values():
if mount_point['idev'] == idev:
return mount_point
def get_disk_usage(self, path=None):
"""Return the total, used and free disk space in bytes as named tuple,
or placeholder values simulating unlimited space if not set.
.. note:: This matches the return value of shutil.disk_usage().
Args:
path: The disk space is returned for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems).
"""
DiskUsage = namedtuple('usage', 'total, used, free')
if path is None:
mount_point = self.mount_points[self.root.name]
else:
mount_point = self._mount_point_for_path(path)
if mount_point and mount_point['total_size'] is not None:
return DiskUsage(mount_point['total_size'],
mount_point['used_size'],
mount_point['total_size'] -
mount_point['used_size'])
return DiskUsage(
1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)
def set_disk_usage(self, total_size, path=None):
"""Changes the total size of the file system, preserving the used space.
Example usage: set the size of an auto-mounted Windows drive.
Args:
total_size: The new total size of the filesystem in bytes.
path: The disk space is changed for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems).
Raises:
OSError: if the new space is smaller than the used size.
"""
if path is None:
path = self.root.name
mount_point = self._mount_point_for_path(path)
if (mount_point['total_size'] is not None and
mount_point['used_size'] > total_size):
self.raise_os_error(errno.ENOSPC, path)
mount_point['total_size'] = total_size
def change_disk_usage(self, usage_change, file_path, st_dev):
"""Change the used disk space by the given amount.
Args:
usage_change: Number of bytes added to the used space.
If negative, the used space will be decreased.
file_path: The path of the object needing the disk space.
st_dev: The device ID for the respective file system.
Raises:
OSError: if usage_change exceeds the free file system space
"""
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if total_size is not None:
if total_size - mount_point['used_size'] < usage_change:
self.raise_os_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change
def stat(self, entry_path, follow_symlinks=True):
"""Return the os.stat-like tuple for the FakeFile object of entry_path.
Args:
entry_path: Path to filesystem object to retrieve.
follow_symlinks: If False and entry_path points to a symlink,
the link itself is inspected instead of the linked object.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
"""
# stat should return the tuple representing return value of os.stat
file_object = self.resolve(
entry_path, follow_symlinks,
allow_fd=True, check_read_perm=False)
if not is_root():
# make sure stat raises if a parent dir is not readable
parent_dir = file_object.parent_dir
if parent_dir:
self.get_object(parent_dir.path)
self.raise_for_filepath_ending_with_separator(
entry_path, file_object, follow_symlinks)
return file_object.stat_result.copy()
def raise_for_filepath_ending_with_separator(self, entry_path,
file_object,
follow_symlinks=True,
macos_handling=False):
if self.ends_with_path_separator(entry_path):
if S_ISLNK(file_object.st_mode):
try:
link_object = self.resolve(entry_path)
except OSError as exc:
if self.is_macos and exc.errno != errno.ENOENT:
return
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, entry_path)
raise
if not follow_symlinks or self.is_windows_fs or self.is_macos:
file_object = link_object
if self.is_windows_fs:
is_error = S_ISREG(file_object.st_mode)
elif self.is_macos and macos_handling:
is_error = not S_ISLNK(file_object.st_mode)
else:
is_error = not S_ISDIR(file_object.st_mode)
if is_error:
error_nr = (errno.EINVAL if self.is_windows_fs
else errno.ENOTDIR)
self.raise_os_error(error_nr, entry_path)
def chmod(self, path, mode, follow_symlinks=True):
"""Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
follow_symlinks: If `False` and `path` points to a symlink,
the link itself is affected instead of the linked object.
"""
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if self.is_windows_fs:
if mode & PERM_WRITE:
file_object.st_mode = file_object.st_mode | 0o222
else:
file_object.st_mode = file_object.st_mode & 0o777555
else:
file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
(mode & PERM_ALL))
file_object.st_ctime = time.time()
def utime(self, path, times=None, *, ns=None, follow_symlinks=True):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If `None`, both times are set to the current time.
follow_symlinks: If `False` and entry_path points to a symlink,
the link itself is queried instead of the linked object.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
self._handle_utime_arg_errors(ns, times)
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if times is not None:
for file_time in times:
if not isinstance(file_time, (int, float)):
raise TypeError('atime and mtime must be numbers')
file_object.st_atime = times[0]
file_object.st_mtime = times[1]
elif ns is not None:
for file_time in ns:
if not isinstance(file_time, int):
raise TypeError('atime and mtime must be ints')
file_object.st_atime_ns = ns[0]
file_object.st_mtime_ns = ns[1]
else:
current_time = time.time()
file_object.st_atime = current_time
file_object.st_mtime = current_time
def _handle_utime_arg_errors(self, ns, times):
if times is not None and ns is not None:
raise ValueError(
"utime: you may specify either 'times' or 'ns' but not both")
if times is not None and len(times) != 2:
raise TypeError(
"utime: 'times' must be either a tuple of two ints or None")
if ns is not None and len(ns) != 2:
raise TypeError("utime: 'ns' must be a tuple of two ints")
@Deprecator
def SetIno(self, path, st_ino):
"""Set the self.st_ino attribute of file at 'path'.
Note that a unique inode is assigned automatically to a new fake file.
Using this function does not guarantee uniqueness and should used
with caution.
Args:
path: Path to file.
st_ino: The desired inode.
"""
self.get_object(path).st_ino = st_ino
def _add_open_file(self, file_obj):
"""Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
"""
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1
def _close_open_file(self, file_des):
"""Remove file object with given descriptor from the list
of open files.
Sets the entry in open_files to None.
Args:
file_des: Descriptor of file object to be removed from
open files list.
"""
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
def get_open_file(self, file_des):
"""Return an open file.
Args:
file_des: File descriptor of the open file.
Raises:
OSError: an invalid file descriptor.
TypeError: filedes is not an integer.
Returns:
Open file object.
"""
if not is_int_type(file_des):
raise TypeError('an integer is required')
if (file_des >= len(self.open_files) or
self.open_files[file_des] is None):
self.raise_os_error(errno.EBADF, str(file_des))
return self.open_files[file_des][0]
def has_open_file(self, file_object):
"""Return True if the given file object is in the list of open files.
Args:
file_object: The FakeFile object to be checked.
Returns:
`True` if the file is open.
"""
return (file_object in [wrappers[0].get_object()
for wrappers in self.open_files if wrappers])
def _normalize_path_sep(self, path):
if self.alternative_path_separator is None or not path:
return path
return path.replace(self._alternative_path_separator(path),
self._path_separator(path))
def normcase(self, path):
"""Replace all appearances of alternative path separator
with path separator.
Do nothing if no alternative separator is set.
Args:
path: The path to be normalized.
Returns:
The normalized path that will be used internally.
"""
path = make_string_path(path)
return self._normalize_path_sep(path)
def normpath(self, path):
"""Mimic os.path.normpath using the specified path_separator.
Mimics os.path.normpath using the path_separator that was specified
for this FakeFilesystem. Normalizes the path, but unlike the method
absnormpath, does not make it absolute. Eliminates dot components
(. and ..) and combines repeated path separators (//). Initial ..
components are left in place for relative paths.
If the result is an empty path, '.' is returned instead.
This also replaces alternative path separator with path separator.
That is, it behaves like the real os.path.normpath on Windows if
initialized with '\\' as path separator and '/' as alternative
separator.
Args:
path: (str) The path to normalize.
Returns:
(str) A copy of path with empty components and dot components
removed.
"""
path = self.normcase(path)
drive, path = self.splitdrive(path)
sep = self._path_separator(path)
is_absolute_path = path.startswith(sep)
path_components = path.split(sep)
collapsed_path_components = []
dot = self._matching_string(path, '.')
dotdot = self._matching_string(path, '..')
for component in path_components:
if (not component) or (component == dot):
continue
if component == dotdot:
if collapsed_path_components and (
collapsed_path_components[-1] != dotdot):
# Remove an up-reference: directory/..
collapsed_path_components.pop()
continue
elif is_absolute_path:
# Ignore leading .. components if starting from the
# root directory.
continue
collapsed_path_components.append(component)
collapsed_path = sep.join(collapsed_path_components)
if is_absolute_path:
collapsed_path = sep + collapsed_path
return drive + collapsed_path or dot
def _original_path(self, path):
"""Return a normalized case version of the given path for
case-insensitive file systems. For case-sensitive file systems,
return path unchanged.
Args:
path: the file path to be transformed
Returns:
A version of path matching the case of existing path elements.
"""
def components_to_path():
if len(path_components) > len(normalized_components):
normalized_components.extend(
path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if path.startswith(sep) and not normalized_path.startswith(sep):
normalized_path = sep + normalized_path
return normalized_path
if self.is_case_sensitive or not path:
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if not isinstance(current_dir, FakeDirectory):
return components_to_path()
dir_name, current_dir = self._directory_content(
current_dir, component)
if current_dir is None or (
isinstance(current_dir, FakeDirectory) and
current_dir._byte_contents is None and
current_dir.st_size == 0):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
def absnormpath(self, path):
"""Absolutize and minimalize the given path.
Forces all relative paths to be absolute, and normalizes the path to
eliminate dot and empty components.
Args:
path: Path to normalize.
Returns:
The normalized path relative to the current working directory,
or the root directory if path is empty.
"""
path = self.normcase(path)
cwd = self._matching_string(path, self.cwd)
if not path:
path = self.path_separator
if path == self._matching_string(path, '.'):
path = cwd
elif not self._starts_with_root_path(path):
# Prefix relative paths with cwd, if cwd is not root.
root_name = self._matching_string(path, self.root.name)
empty = self._matching_string(path, '')
path = self._path_separator(path).join(
(cwd != root_name and cwd or empty, path))
if path == self._matching_string(path, '.'):
path = cwd
return self.normpath(path)
def splitpath(self, path):
"""Mimic os.path.splitpath using the specified path_separator.
Mimics os.path.splitpath using the path_separator that was specified
for this FakeFilesystem.
Args:
path: (str) The path to split.
Returns:
(str) A duple (pathname, basename) for which pathname does not
end with a slash, and basename does not contain a slash.
"""
path = self.normcase(path)
sep = self._path_separator(path)
path_components = path.split(sep)
if not path_components:
return ('', '')
starts_with_drive = self._starts_with_drive_letter(path)
basename = path_components.pop()
colon = self._matching_string(path, ':')
if not path_components:
if starts_with_drive:
components = basename.split(colon)
return (components[0] + colon, components[1])
return ('', basename)
for component in path_components:
if component:
# The path is not the root; it contains a non-separator
# component. Strip all trailing separators.
while not path_components[-1]:
path_components.pop()
if starts_with_drive:
if not path_components:
components = basename.split(colon)
return (components[0] + colon, components[1])
if (len(path_components) == 1 and
path_components[0].endswith(colon)):
return (path_components[0] + sep, basename)
return (sep.join(path_components), basename)
# Root path. Collapse all leading separators.
return (sep, basename)
def splitdrive(self, path):
"""Splits the path into the drive part and the rest of the path.
Taken from Windows specific implementation in Python 3.5
and slightly adapted.
Args:
path: the full path to be splitpath.
Returns:
A tuple of the drive part and the rest of the path, or of
an empty string and the full path if drive letters are
not supported or no drive is present.
"""
path = make_string_path(path)
if self.is_windows_fs:
if len(path) >= 2:
path = self.normcase(path)
sep = self._path_separator(path)
# UNC path handling
if (path[0:2] == sep * 2) and (
path[2:3] != sep):
# UNC path handling - splits off the mount point
# instead of the drive
sep_index = path.find(sep, 2)
if sep_index == -1:
return path[:0], path
sep_index2 = path.find(sep, sep_index + 1)
if sep_index2 == sep_index + 1:
return path[:0], path
if sep_index2 == -1:
sep_index2 = len(path)
return path[:sep_index2], path[sep_index2:]
if path[1:2] == self._matching_string(path, ':'):
return path[:2], path[2:]
return path[:0], path
def _join_paths_with_drive_support(self, *all_paths):
"""Taken from Python 3.5 os.path.join() code in ntpath.py
and slightly adapted"""
base_path = all_paths[0]
paths_to_add = all_paths[1:]
sep = self._path_separator(base_path)
seps = [sep, self._alternative_path_separator(base_path)]
result_drive, result_path = self.splitdrive(base_path)
for path in paths_to_add:
drive_part, path_part = self.splitdrive(path)
if path_part and path_part[:1] in seps:
# Second path is absolute
if drive_part or not result_drive:
result_drive = drive_part
result_path = path_part
continue
elif drive_part and drive_part != result_drive:
if (self.is_case_sensitive or
drive_part.lower() != result_drive.lower()):
# Different drives => ignore the first path entirely
result_drive = drive_part
result_path = path_part
continue
# Same drive in different case
result_drive = drive_part
# Second path is relative to the first
if result_path and result_path[-1:] not in seps:
result_path = result_path + sep
result_path = result_path + path_part
# add separator between UNC and non-absolute path
colon = self._matching_string(base_path, ':')
if (result_path and result_path[:1] not in seps and
result_drive and result_drive[-1:] != colon):
return result_drive + sep + result_path
return result_drive + result_path
def joinpaths(self, *paths):
"""Mimic os.path.join using the specified path_separator.
Args:
*paths: (str) Zero or more paths to join.
Returns:
(str) The paths joined by the path separator, starting with
the last absolute path in paths.
"""
if sys.version_info >= (3, 6):
paths = [os.fspath(path) for path in paths]
if len(paths) == 1:
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*paths)
joined_path_segments = []
sep = self._path_separator(paths[0])
for path_segment in paths:
if self._starts_with_root_path(path_segment):
# An absolute path
joined_path_segments = [path_segment]
else:
if (joined_path_segments and
not joined_path_segments[-1].endswith(sep)):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return self._matching_string(paths[0], '').join(joined_path_segments)
def _path_components(self, path):
"""Breaks the path into a list of component names.
Does not include the root directory as a component, as all paths
are considered relative to the root directory for the FakeFilesystem.
Callers should basically follow this pattern:
.. code:: python
file_path = self.absnormpath(file_path)
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
if component not in current_dir.contents:
raise OSError
_do_stuff_with_component(current_dir, component)
current_dir = current_dir.get_entry(component)
Args:
path: Path to tokenize.
Returns:
The list of names split from path.
"""
if not path or path == self._path_separator(path):
return []
drive, path = self.splitdrive(path)
path_components = path.split(self._path_separator(path))
assert drive or path_components
if not path_components[0]:
if len(path_components) > 1 and not path_components[1]:
path_components = []
else:
# This is an absolute path.
path_components = path_components[1:]
if drive:
path_components.insert(0, drive)
return path_components
def _starts_with_drive_letter(self, file_path):
"""Return True if file_path starts with a drive letter.
Args:
file_path: the full path to be examined.
Returns:
`True` if drive letter support is enabled in the filesystem and
the path starts with a drive letter.
"""
colon = self._matching_string(file_path, ':')
return (self.is_windows_fs and len(file_path) >= 2 and
file_path[:1].isalpha and (file_path[1:2]) == colon)
def _starts_with_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
file_path = self._normalize_path_sep(file_path)
return (file_path.startswith(root_name) or
not self.is_case_sensitive and file_path.lower().startswith(
root_name.lower()) or
self._starts_with_drive_letter(file_path))
def _is_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
return (file_path == root_name or not self.is_case_sensitive and
file_path.lower() == root_name.lower() or
2 <= len(file_path) <= 3 and
self._starts_with_drive_letter(file_path))
def ends_with_path_separator(self, file_path):
"""Return True if ``file_path`` ends with a valid path separator."""
if is_int_type(file_path):
return False
file_path = make_string_path(file_path)
return (file_path and
file_path not in (self.path_separator,
self.alternative_path_separator) and
(file_path.endswith(self._path_separator(file_path)) or
self.alternative_path_separator is not None and
file_path.endswith(
self._alternative_path_separator(file_path))))
def is_filepath_ending_with_separator(self, path):
if not self.ends_with_path_separator(path):
return False
return self.isfile(self._path_without_trailing_separators(path))
def _directory_content(self, directory, component):
if not isinstance(directory, FakeDirectory):
return None, None
if component in directory.contents:
return component, directory.contents[component]
if not self.is_case_sensitive:
matching_content = [(subdir, directory.contents[subdir]) for
subdir in directory.contents
if subdir.lower() == component.lower()]
if matching_content:
return matching_content[0]
return None, None
def exists(self, file_path, check_link=False):
"""Return true if a path points to an existing file system object.
Args:
file_path: The path to examine.
Returns:
(bool) True if the corresponding object exists.
Raises:
TypeError: if file_path is None.
"""
if check_link and self.islink(file_path):
return True
file_path = make_string_path(file_path)
if file_path is None:
raise TypeError
if not file_path:
return False
if file_path == self.dev_null.name:
return not self.is_windows_fs or sys.version_info >= (3, 8)
try:
if self.is_filepath_ending_with_separator(file_path):
return False
file_path = self.resolve_path(file_path)
except OSError:
return False
if file_path == self.root.name:
return True
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
current_dir = self._directory_content(current_dir, component)[1]
if not current_dir:
return False
return True
def resolve_path(self, file_path, allow_fd=False, raw_io=True):
"""Follow a path, resolving symlinks.
ResolvePath traverses the filesystem along the specified file path,
resolving file names and symbolic links until all elements of the path
are exhausted, or we reach a file which does not exist.
If all the elements are not consumed, they just get appended to the
path resolved so far.
This gives us the path which is as resolved as it can be, even if the
file does not exist.
This behavior mimics Unix semantics, and is best shown by example.
Given a file system that looks like this:
/a/b/
/a/b/c -> /a/b2 c is a symlink to /a/b2
/a/b2/x
/a/c -> ../d
/a/x -> y
Then:
/a/b/x => /a/b/x
/a/c => /a/d
/a/x => /a/y
/a/b/c/d/e => /a/b2/d/e
Args:
file_path: The path to examine.
allow_fd: If `True`, `file_path` may be open file descriptor.
raw_io: `True` if called from low-level I/O functions.
Returns:
The resolved_path (string) or None.
Raises:
TypeError: if `file_path` is `None`.
OSError: if `file_path` is '' or a part of the path doesn't exist.
"""
if allow_fd and isinstance(file_path, int):
return self.get_open_file(file_path).get_object().path
file_path = make_string_path(file_path)
if file_path is None:
# file.open(None) raises TypeError, so mimic that.
raise TypeError('Expected file system path string, received None')
if not file_path or not self._valid_relative_path(file_path):
# file.open('') raises OSError, so mimic that, and validate that
# all parts of a relative path exist.
self.raise_os_error(errno.ENOENT, file_path)
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
return file_path
if file_path == self.dev_null.name:
return file_path
path_components = self._path_components(file_path)
resolved_components = self._resolve_components(path_components, raw_io)
return self._components_to_path(resolved_components)
def _components_to_path(self, component_folders):
sep = (self._path_separator(component_folders[0])
if component_folders else self.path_separator)
path = sep.join(component_folders)
if not self._starts_with_root_path(path):
path = sep + path
return path
def _resolve_components(self, path_components, raw_io):
current_dir = self.root
link_depth = 0
resolved_components = []
while path_components:
component = path_components.pop(0)
resolved_components.append(component)
current_dir = self._directory_content(current_dir, component)[1]
if current_dir is None:
# The component of the path at this point does not actually
# exist in the folder. We can't resolve the path any more.
# It is legal to link to a file that does not yet exist, so
# rather than raise an error, we just append the remaining
# components to what return path we have built so far and
# return that.
resolved_components.extend(path_components)
break
# Resolve any possible symlinks in the current path component.
if S_ISLNK(current_dir.st_mode):
# This link_depth check is not really meant to be an accurate
# check. It is just a quick hack to prevent us from looping
# forever on cycles.
if link_depth > _MAX_LINK_DEPTH:
self.raise_os_error(errno.ELOOP,
self._components_to_path(
resolved_components))
link_path = self._follow_link(resolved_components, current_dir)
# Following the link might result in the complete replacement
# of the current_dir, so we evaluate the entire resulting path.
target_components = self._path_components(link_path)
path_components = target_components + path_components
resolved_components = []
current_dir = self.root
link_depth += 1
return resolved_components
def _valid_relative_path(self, file_path):
if self.is_windows_fs:
return True
slash_dotdot = self._matching_string(
file_path, self.path_separator + '..')
while file_path and slash_dotdot in file_path:
file_path = file_path[:file_path.rfind(slash_dotdot)]
if not self.exists(self.absnormpath(file_path)):
return False
return True
def _follow_link(self, link_path_components, link):
"""Follow a link w.r.t. a path resolved so far.
The component is either a real file, which is a no-op, or a
symlink. In the case of a symlink, we have to modify the path
as built up so far
/a/b => ../c should yield /a/../c (which will normalize to /a/c)
/a/b => x should yield /a/x
/a/b => /x/y/z should yield /x/y/z
The modified path may land us in a new spot which is itself a
link, so we may repeat the process.
Args:
link_path_components: The resolved path built up to the link
so far.
link: The link object itself.
Returns:
(string) The updated path resolved after following the link.
Raises:
OSError: if there are too many levels of symbolic link
"""
link_path = link.contents
# ignore UNC prefix for local files
if self.is_windows_fs and link_path.startswith('\\\\?\\'):
link_path = link_path[4:]
sep = self._path_separator(link_path)
# For links to absolute paths, we want to throw out everything
# in the path built so far and replace with the link. For relative
# links, we have to append the link to what we have so far,
if not self._starts_with_root_path(link_path):
# Relative path. Append remainder of path to what we have
# processed so far, excluding the name of the link itself.
# /a/b => ../c should yield /a/../c
# (which will normalize to /c)
# /a/b => d should yield a/d
components = link_path_components[:-1]
components.append(link_path)
link_path = sep.join(components)
# Don't call self.NormalizePath(), as we don't want to prepend
# self.cwd.
return self.normpath(link_path)
def get_object_from_normpath(self, file_path, check_read_perm=True):
"""Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies target FakeFile object to retrieve, with a
path that has already been normalized/resolved.
check_read_perm: If True, raises OSError if a parent directory
does not have read permission
Returns:
The FakeFile object corresponding to file_path.
Raises:
OSError: if the object is not found.
"""
file_path = make_string_path(file_path)
if file_path == self.root.name:
return self.root
if file_path == self.dev_null.name:
return self.dev_null
file_path = self._original_path(file_path)
path_components = self._path_components(file_path)
target_object = self.root
try:
for component in path_components:
if S_ISLNK(target_object.st_mode):
target_object = self.resolve(target_object.contents)
if not S_ISDIR(target_object.st_mode):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOTDIR, file_path)
self.raise_os_error(errno.ENOENT, file_path)
target_object = target_object.get_entry(component)
if (not is_root() and check_read_perm and target_object and
not target_object.st_mode & PERM_READ):
self.raise_os_error(errno.EACCES, target_object.path)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
return target_object
def get_object(self, file_path, check_read_perm=True):
"""Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies the target FakeFile object to retrieve.
check_read_perm: If True, raises OSError if a parent directory
does not have read permission
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
OSError: if the object is not found.
"""
file_path = make_string_path(file_path)
file_path = self.absnormpath(self._original_path(file_path))
return self.get_object_from_normpath(file_path, check_read_perm)
def resolve(self, file_path, follow_symlinks=True, allow_fd=False,
check_read_perm=True):
"""Search for the specified filesystem object, resolving all links.
Args:
file_path: Specifies the target FakeFile object to retrieve.
follow_symlinks: If `False`, the link itself is resolved,
otherwise the object linked to.
allow_fd: If `True`, `file_path` may be an open file descriptor
check_read_perm: If True, raises OSError if a parent directory
does not have read permission
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
OSError: if the object is not found.
"""
if isinstance(file_path, int):
if allow_fd:
return self.get_open_file(file_path).get_object()
raise TypeError('path should be string, bytes or '
'os.PathLike (if supported), not int')
if follow_symlinks:
file_path = make_string_path(file_path)
return self.get_object_from_normpath(self.resolve_path(
file_path, check_read_perm), check_read_perm)
return self.lresolve(file_path)
def lresolve(self, path):
"""Search for the specified object, resolving only parent links.
This is analogous to the stat/lstat difference. This resolves links
*to* the object but not of the final object itself.
Args:
path: Specifies target FakeFile object to retrieve.
Returns:
The FakeFile object corresponding to path.
Raises:
OSError: if the object is not found.
"""
path = make_string_path(path)
if not path:
raise OSError(errno.ENOENT, path)
if path == self.root.name:
# The root directory will never be a link
return self.root
# remove trailing separator
path = self._path_without_trailing_separators(path)
if path == self._matching_string(path, '.'):
path = self.cwd
path = self._original_path(path)
parent_directory, child_name = self.splitpath(path)
if not parent_directory:
parent_directory = self.cwd
try:
parent_obj = self.resolve(parent_directory)
assert parent_obj
if not isinstance(parent_obj, FakeDirectory):
if not self.is_windows_fs and isinstance(parent_obj, FakeFile):
self.raise_os_error(errno.ENOTDIR, path)
self.raise_os_error(errno.ENOENT, path)
if not parent_obj.st_mode & PERM_READ:
self.raise_os_error(errno.EACCES, parent_directory)
return (parent_obj.get_entry(child_name) if child_name
else parent_obj)
except KeyError:
self.raise_os_error(errno.ENOENT, path)
def add_object(self, file_path, file_object):
"""Add a fake file or directory into the filesystem at file_path.
Args:
file_path: The path to the file to be added relative to self.
file_object: File or directory to add.
Raises:
OSError: if file_path does not correspond to a
directory.
"""
if not file_path:
target_directory = self.root
else:
target_directory = self.resolve(file_path)
if not S_ISDIR(target_directory.st_mode):
error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, file_path)
target_directory.add_entry(file_object)
def rename(self, old_file_path, new_file_path, force_replace=False):
"""Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Args:
old_file_path: Path to filesystem object to rename.
new_file_path: Path to where the filesystem object will live
after this call.
force_replace: If set and destination is an existing file, it
will be replaced even under Windows if the user has
permissions, otherwise replacement happens under Unix only.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory
(Windows, or Posix if old_file_path points to a regular file)
OSError: if old_file_path is a directory and new_file_path a file
OSError: if new_file_path is an existing file and force_replace
not set (Windows only).
OSError: if new_file_path is an existing file and could not be
removed (Posix, or Windows with force_replace set).
OSError: if dirname(new_file_path) does not exist.
OSError: if the file would be moved to another filesystem
(e.g. mount point).
"""
ends_with_sep = self.ends_with_path_separator(old_file_path)
old_file_path = self.absnormpath(old_file_path)
new_file_path = self.absnormpath(new_file_path)
if not self.exists(old_file_path, check_link=True):
self.raise_os_error(errno.ENOENT, old_file_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_file_path)
old_object = self.lresolve(old_file_path)
if not self.is_windows_fs:
self._handle_posix_dir_link_errors(
new_file_path, old_file_path, ends_with_sep)
if self.exists(new_file_path, check_link=True):
new_file_path = self._rename_to_existing_path(
force_replace, new_file_path, old_file_path,
old_object, ends_with_sep)
if not new_file_path:
return
old_dir, old_name = self.splitpath(old_file_path)
new_dir, new_name = self.splitpath(new_file_path)
if not self.exists(new_dir):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if old_dir_object.st_dev != new_dir_object.st_dev:
self.raise_os_error(errno.EXDEV, old_file_path)
if not S_ISDIR(new_dir_object.st_mode):
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.ENOTDIR,
new_file_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_file_path)
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
if new_name in new_dir_object.contents:
# in case of overwriting remove the old entry first
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename)
def _handle_broken_link_with_trailing_sep(self, path):
# note that the check for trailing sep has to be done earlier
if self.islink(path):
if not self.exists(path):
error = (errno.ENOENT if self.is_macos else
errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, path)
def _handle_posix_dir_link_errors(self, new_file_path, old_file_path,
ends_with_sep):
if (self.isdir(old_file_path, follow_symlinks=False) and
self.islink(new_file_path)):
self.raise_os_error(errno.ENOTDIR, new_file_path)
if (self.isdir(new_file_path, follow_symlinks=False) and
self.islink(old_file_path)):
if ends_with_sep and self.is_macos:
return
error = errno.ENOTDIR if ends_with_sep else errno.EISDIR
self.raise_os_error(error, new_file_path)
if (ends_with_sep and self.islink(old_file_path) and
old_file_path == new_file_path and not self.is_windows_fs):
self.raise_os_error(errno.ENOTDIR, new_file_path)
def _rename_to_existing_path(self, force_replace, new_file_path,
old_file_path, old_object, ends_with_sep):
new_object = self.get_object(new_file_path)
if old_file_path == new_file_path:
if not S_ISLNK(new_object.st_mode) and ends_with_sep:
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_file_path)
return # Nothing to do here.
if old_object == new_object:
new_file_path = self._rename_same_object(
new_file_path, old_file_path)
elif (S_ISDIR(new_object.st_mode) or S_ISLNK(new_object.st_mode)):
self._handle_rename_error_for_dir_or_link(
force_replace, new_file_path,
new_object, old_object, ends_with_sep)
elif S_ISDIR(old_object.st_mode):
error = errno.EEXIST if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, new_file_path)
elif self.is_windows_fs and not force_replace:
self.raise_os_error(errno.EEXIST, new_file_path)
else:
self.remove_object(new_file_path)
return new_file_path
def _handle_rename_error_for_dir_or_link(self, force_replace,
new_file_path, new_object,
old_object, ends_with_sep):
if self.is_windows_fs:
if force_replace:
self.raise_os_error(errno.EACCES, new_file_path)
else:
self.raise_os_error(errno.EEXIST, new_file_path)
if not S_ISLNK(new_object.st_mode):
if new_object.contents:
if (not S_ISLNK(old_object.st_mode) or
not ends_with_sep or not self.is_macos):
self.raise_os_error(errno.ENOTEMPTY, new_file_path)
if S_ISREG(old_object.st_mode):
self.raise_os_error(errno.EISDIR, new_file_path)
def _rename_same_object(self, new_file_path, old_file_path):
do_rename = old_file_path.lower() == new_file_path.lower()
if not do_rename:
try:
real_old_path = self.resolve_path(old_file_path)
original_old_path = self._original_path(real_old_path)
real_new_path = self.resolve_path(new_file_path)
if (real_new_path == original_old_path and
(new_file_path == real_old_path) ==
(new_file_path.lower() ==
real_old_path.lower())):
real_object = self.resolve(old_file_path,
follow_symlinks=False)
do_rename = (os.path.basename(old_file_path) ==
real_object.name or not self.is_macos)
else:
do_rename = (real_new_path.lower() ==
real_old_path.lower())
if do_rename:
# only case is changed in case-insensitive file
# system - do the rename
parent, file_name = self.splitpath(new_file_path)
new_file_path = self.joinpaths(
self._original_path(parent), file_name)
except OSError:
# ResolvePath may fail due to symlink loop issues or
# similar - in this case just assume the paths
# to be different
pass
if not do_rename:
# hard links to the same file - nothing to do
new_file_path = None
return new_file_path
def remove_object(self, file_path):
"""Remove an existing file or directory.
Args:
file_path: The path to the file relative to self.
Raises:
OSError: if file_path does not correspond to an existing file, or
if part of the path refers to something other than a directory.
OSError: if the directory is in use (eg, if it is '/').
"""
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
self.raise_os_error(errno.EBUSY, file_path)
try:
dirname, basename = self.splitpath(file_path)
target_directory = self.resolve(dirname, check_read_perm=False)
target_directory.remove_entry(basename)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
except AttributeError:
self.raise_os_error(errno.ENOTDIR, file_path)
def make_string_path(self, path):
path = make_string_path(path)
os_sep = self._matching_string(path, os.sep)
fake_sep = self._matching_string(path, self.path_separator)
return path.replace(os_sep, fake_sep)
def create_dir(self, directory_path, perm_bits=PERM_DEF):
"""Create `directory_path`, and all the parent directories.
Helper method to set up your test faster.
Args:
directory_path: The full directory path to create.
perm_bits: The permission bits as set by `chmod`.
Returns:
The newly created FakeDirectory object.
Raises:
OSError: if the directory already exists.
"""
directory_path = self.make_string_path(directory_path)
directory_path = self.absnormpath(directory_path)
self._auto_mount_drive_if_needed(directory_path)
if self.exists(directory_path, check_link=True):
self.raise_os_error(errno.EEXIST, directory_path)
path_components = self._path_components(directory_path)
current_dir = self.root
new_dirs = []
for component in path_components:
directory = self._directory_content(current_dir, component)[1]
if not directory:
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
if S_ISLNK(directory.st_mode):
directory = self.resolve(directory.contents)
current_dir = directory
if directory.st_mode & S_IFDIR != S_IFDIR:
self.raise_os_error(errno.ENOTDIR, current_dir.path)
# set the permission after creating the directories
# to allow directory creation inside a read-only directory
for new_dir in new_dirs:
new_dir.st_mode = S_IFDIR | perm_bits
return current_dir
def create_file(self, file_path, st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None, create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
side_effect=None):
"""Create `file_path`, including all the parent directories along
the way.
This helper method can be used to set up tests more easily.
Args:
file_path: The path to the file to create.
st_mode: The stat constant representing the file type.
contents: the contents of the file. If not given and st_size is
None, an empty file is assumed.
st_size: file size; only valid if contents not given. If given,
the file is considered to be in "large file mode" and trying
to read from or write to the file will result in an exception.
create_missing_dirs: If `True`, auto create missing directories.
apply_umask: `True` if the current umask must be applied
on `st_mode`.
encoding: If `contents` is a unicode string, the encoding used
for serialization.
errors: The error mode used for encoding/decoding errors.
side_effect: function handle that is executed when file is written,
must accept the file object as an argument.
Returns:
The newly created FakeFile object.
Raises:
OSError: if the file already exists.
OSError: if the containing directory is required and missing.
"""
return self.create_file_internally(
file_path, st_mode, contents, st_size, create_missing_dirs,
apply_umask, encoding, errors, side_effect=side_effect)
def add_real_file(self, source_path, read_only=True, target_path=None):
"""Create `file_path`, including all the parent directories along the
way, for an existing real file. The contents of the real file are read
only on demand.
Args:
source_path: Path to an existing file in the real file system
read_only: If `True` (the default), writing to the fake file
raises an exception. Otherwise, writing to the file changes
the fake file only.
target_path: If given, the path of the target direction,
otherwise it is equal to `source_path`.
Returns:
the newly created FakeFile object.
Raises:
OSError: if the file does not exist in the real file system.
OSError: if the file already exists in the fake file system.
.. note:: On most systems, accessing the fake file's contents may
update both the real and fake files' `atime` (access time).
In this particular case, `add_real_file()` violates the rule
that `pyfakefs` must not modify the real file system.
"""
target_path = target_path or source_path
source_path = make_string_path(source_path)
target_path = self.make_string_path(target_path)
real_stat = os.stat(source_path)
fake_file = self.create_file_internally(target_path,
read_from_real_fs=True)
# for read-only mode, remove the write/executable permission bits
fake_file.stat_result.set_from_stat_result(real_stat)
if read_only:
fake_file.st_mode &= 0o777444
fake_file.file_path = source_path
self.change_disk_usage(fake_file.size, fake_file.name,
fake_file.st_dev)
return fake_file
def add_real_symlink(self, source_path, target_path=None):
"""Create a symlink at source_path (or target_path, if given). It will
point to the same path as the symlink on the real filesystem. Relative
symlinks will point relative to their new location. Absolute symlinks
will point to the same, absolute path as on the real filesystem.
Args:
source_path: The path to the existing symlink.
target_path: If given, the name of the symlink in the fake
fileystem, otherwise, the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
OSError: if the symlink could not be created
(see :py:meth:`create_file`).
OSError: if the directory already exists in the fake file system.
"""
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path) and not os.path.islink(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target = os.readlink(source_path)
if target_path:
return self.create_symlink(target_path, target)
else:
return self.create_symlink(source_path, target)
def add_real_directory(self, source_path, read_only=True, lazy_read=True,
target_path=None):
"""Create a fake directory corresponding to the real directory at the
specified path. Add entries in the fake directory corresponding to
the entries in the real directory. Symlinks are supported.
Args:
source_path: The path to the existing directory.
read_only: If set, all files under the directory are treated as
read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files only
as usually.
lazy_read: If set (default), directory contents are only read when
accessed, and only until the needed subdirectory level.
.. note:: This means that the file system size is only updated
at the time the directory contents are read; set this to
`False` only if you are dependent on accurate file system
size in your test
target_path: If given, the target directory, otherwise,
the target directory is the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
OSError: if the directory already exists in the fake file system.
"""
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target_path = target_path or source_path
if lazy_read:
parent_path = os.path.split(target_path)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path)
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(
source_path, self, read_only, target_path)
parent_dir.add_entry(new_dir)
else:
new_dir = self.create_dir(target_path)
for base, _, files in os.walk(source_path):
new_base = os.path.join(new_dir.path,
os.path.relpath(base, source_path))
for fileEntry in os.listdir(base):
abs_fileEntry = os.path.join(base, fileEntry)
if not os.path.islink(abs_fileEntry):
continue
self.add_real_symlink(
abs_fileEntry, os.path.join(new_base, fileEntry))
for fileEntry in files:
path = os.path.join(base, fileEntry)
if os.path.islink(path):
continue
self.add_real_file(path,
read_only,
os.path.join(new_base, fileEntry))
return new_dir
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
"""This convenience method adds multiple files and/or directories from
the real file system to the fake file system. See `add_real_file()` and
`add_real_directory()`.
Args:
path_list: List of file and directory paths in the real file
system.
read_only: If set, all files and files under under the directories
are treated as read-only, e.g. a write access raises an
exception; otherwise, writing to the files changes the fake
files only as usually.
lazy_dir_read: Uses lazy reading of directory contents if set
(see `add_real_directory`)
Raises:
OSError: if any of the files and directories in the list
does not exist in the real file system.
OSError: if any of the files and directories in the list
already exists in the fake file system.
"""
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only)
def create_file_internally(self, file_path,
st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None,
create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
read_from_real_fs=False, raw_io=False,
side_effect=None):
"""Internal fake file creator that supports both normal fake files
and fake files based on real files.
Args:
file_path: path to the file to create.
st_mode: the stat.S_IF constant representing the file type.
contents: the contents of the file. If not given and st_size is
None, an empty file is assumed.
st_size: file size; only valid if contents not given. If given,
the file is considered to be in "large file mode" and trying
to read from or write to the file will result in an exception.
create_missing_dirs: if True, auto create missing directories.
apply_umask: whether or not the current umask must be applied
on st_mode.
encoding: if contents is a unicode string, the encoding used for
serialization.
errors: the error mode used for encoding/decoding errors
read_from_real_fs: if True, the contents are read from the real
file system on demand.
raw_io: `True` if called from low-level API (`os.open`)
side_effect: function handle that is executed when file is written,
must accept the file object as an argument.
"""
file_path = self.make_string_path(file_path)
file_path = self.absnormpath(file_path)
if not is_int_type(st_mode):
raise TypeError(
'st_mode must be of int type - did you mean to set contents?')
if self.exists(file_path, check_link=True):
self.raise_os_error(errno.EEXIST, file_path)
parent_directory, new_file = self.splitpath(file_path)
if not parent_directory:
parent_directory = self.cwd
self._auto_mount_drive_if_needed(parent_directory)
if not self.exists(parent_directory):
if not create_missing_dirs:
self.raise_os_error(errno.ENOENT, parent_directory)
self.create_dir(parent_directory)
else:
parent_directory = self._original_path(parent_directory)
if apply_umask:
st_mode &= ~self.umask
if read_from_real_fs:
file_object = FakeFileFromRealFile(file_path, filesystem=self,
side_effect=side_effect)
else:
file_object = FakeFile(new_file, st_mode, filesystem=self,
encoding=encoding, errors=errors,
side_effect=side_effect)
self.add_object(parent_directory, file_object)
if st_size is None and contents is None:
contents = ''
if (not read_from_real_fs and
(contents is not None or st_size is not None)):
try:
if st_size is not None:
file_object.set_large_file_size(st_size)
else:
file_object._set_initial_contents(contents)
except OSError:
self.remove_object(file_path)
raise
return file_object
# pylint: disable=unused-argument
def create_symlink(self, file_path, link_target, create_missing_dirs=True):
"""Create the specified symlink, pointed at the specified link target.
Args:
file_path: path to the symlink to create
link_target: the target of the symlink
create_missing_dirs: If `True`, any missing parent directories of
file_path will be created
Returns:
The newly created FakeFile object.
Raises:
OSError: if the symlink could not be created
(see :py:meth:`create_file`).
"""
# the link path cannot end with a path separator
file_path = self.make_string_path(file_path)
link_target = self.make_string_path(link_target)
file_path = self.normcase(file_path)
if self.ends_with_path_separator(file_path):
if self.exists(file_path):
self.raise_os_error(errno.EEXIST, file_path)
if self.exists(link_target):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOENT, file_path)
else:
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, link_target)
if not self.exists(
self._path_without_trailing_separators(file_path),
check_link=True):
self.raise_os_error(errno.ENOENT, link_target)
if self.is_macos:
# to avoid EEXIST exception, remove the link
# if it already exists
if self.exists(file_path, check_link=True):
self.remove_object(file_path)
else:
self.raise_os_error(errno.EEXIST, link_target)
# resolve the link path only if it is not a link itself
if not self.islink(file_path):
file_path = self.resolve_path(file_path)
link_target = make_string_path(link_target)
return self.create_file_internally(
file_path, st_mode=S_IFLNK | PERM_DEF,
contents=link_target,
create_missing_dirs=create_missing_dirs,
raw_io=True)
def link(self, old_path, new_path, follow_symlinks=True):
"""Create a hard link at new_path, pointing at old_path.
Args:
old_path: An existing link to the target file.
new_path: The destination path to create a new link at.
follow_symlinks: If False and old_path is a symlink, link the
symlink instead of the object it points to.
Returns:
The FakeFile object referred to by old_path.
Raises:
OSError: if something already exists at new_path.
OSError: if old_path is a directory.
OSError: if the parent directory doesn't exist.
"""
new_path_normalized = self.absnormpath(new_path)
if self.exists(new_path_normalized, check_link=True):
self.raise_os_error(errno.EEXIST, new_path)
new_parent_directory, new_basename = self.splitpath(
new_path_normalized)
if not new_parent_directory:
new_parent_directory = self.cwd
if not self.exists(new_parent_directory):
self.raise_os_error(errno.ENOENT, new_parent_directory)
if self.ends_with_path_separator(old_path):
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_path)
if not self.is_windows_fs and self.ends_with_path_separator(new_path):
self.raise_os_error(errno.ENOENT, old_path)
# Retrieve the target file
try:
old_file = self.resolve(old_path, follow_symlinks=follow_symlinks)
except OSError:
self.raise_os_error(errno.ENOENT, old_path)
if old_file.st_mode & S_IFDIR:
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.EPERM, old_path)
# abuse the name field to control the filename of the
# newly created link
old_file.name = new_basename
self.add_object(new_parent_directory, old_file)
return old_file
def _is_circular_link(self, link_obj):
try:
self.resolve_path(link_obj.contents)
except OSError as exc:
return exc.errno == errno.ELOOP
return False
def readlink(self, path):
"""Read the target of a symlink.
Args:
path: symlink to read the target of.
Returns:
the string representing the path to which the symbolic link points.
Raises:
TypeError: if path is None
OSError: (with errno=ENOENT) if path is not a valid path, or
(with errno=EINVAL) if path is valid, but is not a symlink,
or if the path ends with a path separator (Posix only)
"""
if path is None:
raise TypeError
link_obj = self.lresolve(path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
self.raise_os_error(errno.EINVAL, path)
if self.ends_with_path_separator(path):
if not self.is_windows_fs and self.exists(path):
self.raise_os_error(errno.EINVAL, path)
if not self.exists(link_obj.path):
if self.is_windows_fs:
error = errno.EINVAL
elif self._is_circular_link(link_obj):
if self.is_macos:
return link_obj.path
error = errno.ELOOP
else:
error = errno.ENOENT
self.raise_os_error(error, link_obj.path)
return link_obj.contents
def makedir(self, dir_name, mode=PERM_DEF):
"""Create a leaf Fake directory.
Args:
dir_name: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per :py:meth:`add_object`.
"""
dir_name = make_string_path(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name)
parent_dir, _ = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = self._matching_string(
parent_dir, self.path_separator + '..')
if parent_dir.endswith(ellipsis) and not self.is_windows_fs:
base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis)
if not self.exists(base_dir):
self.raise_os_error(errno.ENOENT, base_dir)
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if self.is_windows_fs and dir_name == self.path_separator:
error_nr = errno.EACCES
else:
error_nr = errno.EEXIST
if ends_with_sep and self.is_macos and not self.exists(dir_name):
# to avoid EEXIST exception, remove the link
self.remove_object(dir_name)
else:
self.raise_os_error(error_nr, dir_name)
head, tail = self.splitpath(dir_name)
self.add_object(
head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
def _path_without_trailing_separators(self, path):
while self.ends_with_path_separator(path):
path = path[:-1]
return path
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False):
"""Create a leaf Fake directory and create any non-existent
parent dirs.
Args:
dir_name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError is
raised if the target directory already exists.
Raises:
OSError: if the directory already exists and exist_ok=False,
or as per :py:meth:`create_dir`.
"""
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
dir_name = to_string(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self.absnormpath(dir_name)
if (ends_with_sep and self.is_macos and
self.exists(dir_name, check_link=True) and
not self.exists(dir_name)):
# to avoid EEXIST exception, remove the link
self.remove_object(dir_name)
path_components = self._path_components(dir_name)
# Raise a permission denied error if thioe first existing directory
# is not writeable.
current_dir = self.root
for component in path_components:
if (component not in current_dir.contents
or not isinstance(current_dir.contents, dict)):
break
else:
current_dir = current_dir.contents[component]
try:
self.create_dir(dir_name, mode & ~self.umask)
except OSError as e:
if e.errno == errno.EACCES:
# permission denied - propagate exception
raise
if (not exist_ok or
not isinstance(self.resolve(dir_name), FakeDirectory)):
if self.is_windows_fs and e.errno == errno.ENOTDIR:
e.errno = errno.ENOENT
self.raise_os_error(e.errno, e.filename)
def _is_of_type(self, path, st_flag, follow_symlinks=True):
"""Helper function to implement isdir(), islink(), etc.
See the stat(2) man page for valid stat.S_I* flag values
Args:
path: Path to file to stat and test
st_flag: The stat.S_I* flag checked for the file's st_mode
Returns:
(boolean) `True` if the st_flag is set in path's st_mode.
Raises:
TypeError: if path is None
"""
path = make_string_path(path)
if path is None:
raise TypeError
try:
obj = self.resolve(path, follow_symlinks)
if obj:
self.raise_for_filepath_ending_with_separator(
path, obj, macos_handling=not follow_symlinks)
return S_IFMT(obj.st_mode) == st_flag
except OSError:
return False
return False
def isdir(self, path, follow_symlinks=True):
"""Determine if path identifies a directory.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a directory (following symlinks).
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFDIR, follow_symlinks)
def isfile(self, path, follow_symlinks=True):
"""Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFREG, follow_symlinks)
def islink(self, path):
"""Determine if path identifies a symbolic link.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a symlink (S_IFLNK set in st_mode)
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFLNK, follow_symlinks=False)
def confirmdir(self, target_directory):
"""Test that the target is actually a directory, raising OSError
if not.
Args:
target_directory: Path to the target directory within the fake
filesystem.
Returns:
The FakeDirectory object corresponding to target_directory.
Raises:
OSError: if the target is not a directory.
"""
directory = self.resolve(target_directory)
if not directory.st_mode & S_IFDIR:
self.raise_os_error(errno.ENOTDIR, target_directory, 267)
return directory
def remove(self, path):
"""Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
"""
norm_path = self.absnormpath(path)
if self.ends_with_path_separator(path):
self._handle_broken_link_with_trailing_sep(norm_path)
if self.exists(norm_path):
obj = self.resolve(norm_path, check_read_perm=False)
if S_IFMT(obj.st_mode) == S_IFDIR:
link_obj = self.lresolve(norm_path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.EISDIR
self.raise_os_error(error, norm_path)
norm_path = make_string_path(norm_path)
if path.endswith(self.path_separator):
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.ENOTDIR
self.raise_os_error(error, norm_path)
else:
self.raise_for_filepath_ending_with_separator(path, obj)
self.remove_object(norm_path)
def rmdir(self, target_directory, allow_symlink=False):
"""Remove a leaf Fake directory.
Args:
target_directory: (str) Name of directory to remove.
allow_symlink: (bool) if `target_directory` is a symlink,
the function just returns, otherwise it raises (Posix only)
Raises:
OSError: if target_directory does not exist.
OSError: if target_directory does not point to a directory.
OSError: if removal failed per FakeFilesystem.RemoveObject.
Cannot remove '.'.
"""
if target_directory in (b'.', u'.'):
error_nr = errno.EACCES if self.is_windows_fs else errno.EINVAL
self.raise_os_error(error_nr, target_directory)
ends_with_sep = self.ends_with_path_separator(target_directory)
target_directory = self.absnormpath(target_directory)
if self.confirmdir(target_directory):
if not self.is_windows_fs and self.islink(target_directory):
if allow_symlink:
return
if not ends_with_sep or not self.is_macos:
self.raise_os_error(errno.ENOTDIR, target_directory)
dir_object = self.resolve(target_directory)
if dir_object.contents:
self.raise_os_error(errno.ENOTEMPTY, target_directory)
self.remove_object(target_directory)
def listdir(self, target_directory):
"""Return a list of file names in target_directory.
Args:
target_directory: Path to the target directory within the
fake filesystem.
Returns:
A list of file names within the target directory in arbitrary
order.
Raises:
OSError: if the target is not a directory.
"""
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = directory.contents
return list(directory_contents.keys())
def __str__(self):
return str(self.root)
def _add_standard_streams(self):
self._add_open_file(StandardStreamWrapper(sys.stdin))
self._add_open_file(StandardStreamWrapper(sys.stdout))
self._add_open_file(StandardStreamWrapper(sys.stderr))
Deprecator.add(FakeFilesystem, FakeFilesystem.get_disk_usage, 'GetDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.set_disk_usage, 'SetDiskUsage')
Deprecator.add(FakeFilesystem,
FakeFilesystem.change_disk_usage, 'ChangeDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_mount_point, 'AddMountPoint')
Deprecator.add(FakeFilesystem, FakeFilesystem.stat, 'GetStat')
Deprecator.add(FakeFilesystem, FakeFilesystem.chmod, 'ChangeMode')
Deprecator.add(FakeFilesystem, FakeFilesystem.utime, 'UpdateTime')
Deprecator.add(FakeFilesystem, FakeFilesystem._add_open_file, 'AddOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem._close_open_file, 'CloseOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.has_open_file, 'HasOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_open_file, 'GetOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem.normcase, 'NormalizePathSeparator')
Deprecator.add(FakeFilesystem, FakeFilesystem.normpath, 'CollapsePath')
Deprecator.add(FakeFilesystem, FakeFilesystem._original_path, 'NormalizeCase')
Deprecator.add(FakeFilesystem, FakeFilesystem.absnormpath, 'NormalizePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitpath, 'SplitPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitdrive, 'SplitDrive')
Deprecator.add(FakeFilesystem, FakeFilesystem.joinpaths, 'JoinPaths')
Deprecator.add(FakeFilesystem,
FakeFilesystem._path_components, 'GetPathComponents')
Deprecator.add(FakeFilesystem, FakeFilesystem._starts_with_drive_letter,
'StartsWithDriveLetter')
Deprecator.add(FakeFilesystem, FakeFilesystem.exists, 'Exists')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve_path, 'ResolvePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object_from_normpath,
'GetObjectFromNormalizedPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object, 'GetObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve, 'ResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.lresolve, 'LResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_object, 'AddObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove_object, 'RemoveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.rename, 'RenameObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_dir, 'CreateDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_file, 'CreateFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_symlink, 'CreateLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.link, 'CreateHardLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.readlink, 'ReadLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedir, 'MakeDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedirs, 'MakeDirectories')
Deprecator.add(FakeFilesystem, FakeFilesystem.isdir, 'IsDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.isfile, 'IsFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.islink, 'IsLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.confirmdir, 'ConfirmDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove, 'RemoveFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.rmdir, 'RemoveDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.listdir, 'ListDir')
class FakePathModule:
"""Faked os.path module replacement.
FakePathModule should *only* be instantiated by FakeOsModule. See the
FakeOsModule docstring for details.
"""
_OS_PATH_COPY = _copy_module(os.path)
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
return [
'abspath', 'dirname', 'exists', 'expanduser', 'getatime',
'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile',
'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath',
'realpath', 'relpath', 'split', 'splitdrive', 'samefile'
]
def __init__(self, filesystem, os_module):
"""Init.
Args:
filesystem: FakeFilesystem used to provide file system information
"""
self.filesystem = filesystem
self._os_path = self._OS_PATH_COPY
self._os_path.os = self.os = os_module
self.sep = self.filesystem.path_separator
self.altsep = self.filesystem.alternative_path_separator
def exists(self, path):
"""Determine whether the file object exists within the fake filesystem.
Args:
path: The path to the file object.
Returns:
(bool) `True` if the file exists.
"""
return self.filesystem.exists(path)
def lexists(self, path):
"""Test whether a path exists. Returns True for broken symbolic links.
Args:
path: path to the symlink object.
Returns:
bool (if file exists).
"""
return self.filesystem.exists(path, check_link=True)
def getsize(self, path):
"""Return the file object size in bytes.
Args:
path: path to the file object.
Returns:
file size in bytes.
"""
file_obj = self.filesystem.resolve(path)
if (self.filesystem.ends_with_path_separator(path) and
S_IFMT(file_obj.st_mode) != S_IFDIR):
error_nr = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOTDIR)
self.filesystem.raise_os_error(error_nr, path)
return file_obj.st_size
def isabs(self, path):
"""Return True if path is an absolute pathname."""
if self.filesystem.is_windows_fs:
path = self.splitdrive(path)[1]
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if self.filesystem.is_windows_fs:
return len(path) > 0 and path[:1] in (sep, altsep)
else:
return (path.startswith(sep) or
altsep is not None and path.startswith(altsep))
def isdir(self, path):
"""Determine if path identifies a directory."""
return self.filesystem.isdir(path)
def isfile(self, path):
"""Determine if path identifies a regular file."""
return self.filesystem.isfile(path)
def islink(self, path):
"""Determine if path identifies a symbolic link.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a symbolic link.
Raises:
TypeError: if path is None.
"""
return self.filesystem.islink(path)
def getmtime(self, path):
"""Returns the modification time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the modification time of the fake file
in number of seconds since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except OSError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3)
def getatime(self, path):
"""Returns the last access time of the fake file.
Note: Access time is not set automatically in fake filesystem
on access.
Args:
path: the path to fake file.
Returns:
(int, float) the access time of the fake file in number of seconds
since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_atime
def getctime(self, path):
"""Returns the creation time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the creation time of the fake file in number of
seconds since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_ctime
def abspath(self, path):
"""Return the absolute version of a path."""
def getcwd():
"""Return the current working directory."""
# pylint: disable=undefined-variable
if isinstance(path, bytes):
return self.os.getcwdb()
else:
return self.os.getcwd()
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if not self.isabs(path):
path = self.join(getcwd(), path)
elif (self.filesystem.is_windows_fs and
path.startswith(sep) or altsep is not None and
path.startswith(altsep)):
cwd = getcwd()
if self.filesystem._starts_with_drive_letter(cwd):
path = self.join(cwd[:2], path)
return self.normpath(path)
def join(self, *p):
"""Return the completed path with a separator of the parts."""
return self.filesystem.joinpaths(*p)
def split(self, path):
"""Split the path into the directory and the filename of the path.
"""
return self.filesystem.splitpath(path)
def splitdrive(self, path):
"""Split the path into the drive part and the rest of the path, if
supported."""
return self.filesystem.splitdrive(path)
def normpath(self, path):
"""Normalize path, eliminating double slashes, etc."""
return self.filesystem.normpath(path)
def normcase(self, path):
"""Convert to lower case under windows, replaces additional path
separator."""
path = self.filesystem.normcase(path)
if self.filesystem.is_windows_fs:
path = path.lower()
return path
def relpath(self, path, start=None):
"""We mostly rely on the native implementation and adapt the
path separator."""
if not path:
raise ValueError("no path specified")
path = make_string_path(path)
if start is not None:
start = make_string_path(start)
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_separator is not None:
path = path.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
start = start.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
start = start.replace(
self.filesystem.path_separator, self._os_path.sep)
path = self._os_path.relpath(path, start)
return path.replace(self._os_path.sep, self.filesystem.path_separator)
def realpath(self, filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path.
"""
if self.filesystem.is_windows_fs:
return self.abspath(filename)
filename = make_string_path(filename)
path, ok = self._joinrealpath(filename[:0], filename, {})
return self.abspath(path)
def samefile(self, path1, path2):
"""Return whether path1 and path2 point to the same file.
Args:
path1: first file path or path object (Python >=3.6)
path2: second file path or path object (Python >=3.6)
Raises:
OSError: if one of the paths does not point to an existing
file system object.
"""
stat1 = self.filesystem.stat(path1)
stat2 = self.filesystem.stat(path2)
return (stat1.st_ino == stat2.st_ino and
stat1.st_dev == stat2.st_dev)
def _joinrealpath(self, path, rest, seen):
"""Join two paths, normalizing and eliminating any symbolic links
encountered in the second path.
Taken from Python source and adapted.
"""
curdir = self.filesystem._matching_string(path, '.')
pardir = self.filesystem._matching_string(path, '..')
sep = self.filesystem._path_separator(path)
if self.isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = self.filesystem.splitpath(path)
if name == pardir:
path = self.filesystem.joinpaths(path, pardir, pardir)
else:
path = pardir
continue
newpath = self.filesystem.joinpaths(path, name)
if not self.filesystem.islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return self.filesystem.joinpaths(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = self._joinrealpath(
path, self.filesystem.readlink(newpath), seen)
if not ok:
return self.filesystem.joinpaths(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
def dirname(self, path):
"""Returns the first part of the result of `split()`."""
return self.split(path)[0]
def expanduser(self, path):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
"""
return self._os_path.expanduser(path).replace(
self._os_path.sep, self.sep)
def ismount(self, path):
"""Return true if the given path is a mount point.
Args:
path: Path to filesystem object to be checked
Returns:
`True` if path is a mount point added to the fake file system.
Under Windows also returns True for drive and UNC roots
(independent of their existence).
"""
path = make_string_path(path)
if not path:
return False
normed_path = self.filesystem.absnormpath(path)
sep = self.filesystem._path_separator(path)
if self.filesystem.is_windows_fs:
if self.filesystem.alternative_path_separator is not None:
path_seps = (
sep, self.filesystem._alternative_path_separator(path)
)
else:
path_seps = (sep,)
drive, rest = self.filesystem.splitdrive(normed_path)
if drive and drive[:1] in path_seps:
return (not rest) or (rest in path_seps)
if rest in path_seps:
return True
for mount_point in self.filesystem.mount_points:
if normed_path.rstrip(sep) == mount_point.rstrip(sep):
return True
return False
def __getattr__(self, name):
"""Forwards any non-faked calls to the real os.path."""
return getattr(self._os_path, name)
class FakeOsModule:
"""Uses FakeFilesystem to provide a fake os module replacement.
Do not create os.path separately from os, as there is a necessary circular
dependency between os and os.path to replicate the behavior of the standard
Python modules. What you want to do is to just let FakeOsModule take care
of `os.path` setup itself.
# You always want to do this.
filesystem = fake_filesystem.FakeFilesystem()
my_os_module = fake_filesystem.FakeOsModule(filesystem)
"""
devnull = None
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
dir = [
'access', 'chdir', 'chmod', 'chown', 'close', 'fstat', 'fsync',
'getcwd', 'lchmod', 'link', 'listdir', 'lstat', 'makedirs',
'mkdir', 'mknod', 'open', 'read', 'readlink', 'remove',
'removedirs', 'rename', 'rmdir', 'stat', 'symlink', 'umask',
'unlink', 'utime', 'walk', 'write', 'getcwdb', 'replace'
]
if sys.platform.startswith('linux'):
dir += [
'fdatasync', 'getxattr', 'listxattr',
'removexattr', 'setxattr'
]
if use_scandir:
dir += ['scandir']
return dir
def __init__(self, filesystem):
"""Also exposes self.path (to fake os.path).
Args:
filesystem: FakeFilesystem used to provide file system information
"""
self.filesystem = filesystem
self.sep = filesystem.path_separator
self.altsep = filesystem.alternative_path_separator
self.linesep = filesystem.line_separator()
self._os_module = os
self.path = FakePathModule(self.filesystem, self)
self.__class__.devnull = ('/dev/nul' if filesystem.is_windows_fs
else '/dev/nul')
def fdopen(self, fd, *args, **kwargs):
"""Redirector to open() builtin function.
Args:
fd: The file descriptor of the file to open.
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
File object corresponding to file_des.
Raises:
TypeError: if file descriptor is not an integer.
"""
if not is_int_type(fd):
raise TypeError('an integer is required')
return FakeFileOpen(self.filesystem)(fd, *args, **kwargs)
def _umask(self):
"""Return the current umask."""
if self.filesystem.is_windows_fs:
# windows always returns 0 - it has no real notion of umask
return 0
if sys.platform == 'win32':
# if we are testing Unix under Windows we assume a default mask
return 0o002
else:
# under Unix, we return the real umask;
# as there is no pure getter for umask, so we have to first
# set a mode to get the previous one and then re-set that
mask = os.umask(0)
os.umask(mask)
return mask
def open(self, path, flags, mode=None, *, dir_fd=None):
"""Return the file descriptor for a FakeFile.
Args:
path: the path to the file
flags: low-level bits to indicate io operation
mode: bits to define default permissions
Note: only basic modes are supported, OS-specific modes are
ignored
dir_fd: If not `None`, the file descriptor of a directory,
with `file_path` being relative to this directory.
Returns:
A file descriptor.
Raises:
OSError: if the path cannot be found
ValueError: if invalid mode is given
NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`
"""
path = self._path_with_dir_fd(path, self.open, dir_fd)
if mode is None:
if self.filesystem.is_windows_fs:
mode = 0o666
else:
mode = 0o777 & ~self._umask()
has_tmpfile_flag = (hasattr(os, 'O_TMPFILE') and
flags & getattr(os, 'O_TMPFILE'))
open_modes = _OpenModes(
must_exist=not flags & os.O_CREAT and not has_tmpfile_flag,
can_read=not flags & os.O_WRONLY,
can_write=flags & (os.O_RDWR | os.O_WRONLY) != 0,
truncate=flags & os.O_TRUNC != 0,
append=flags & os.O_APPEND != 0,
must_not_exist=flags & os.O_EXCL != 0
)
if open_modes.must_not_exist and open_modes.must_exist:
raise NotImplementedError(
'O_EXCL without O_CREAT mode is not supported')
if has_tmpfile_flag:
# this is a workaround for tempfiles that do not have a filename
# as we do not support this directly, we just add a unique filename
# and set the file to delete on close
path = self.filesystem.joinpaths(
path, str(uuid.uuid4()))
if (not self.filesystem.is_windows_fs and
self.filesystem.exists(path)):
# handle opening directory - only allowed under Posix
# with read-only mode
obj = self.filesystem.resolve(path)
if isinstance(obj, FakeDirectory):
if ((not open_modes.must_exist and
not self.filesystem.is_macos)
or open_modes.can_write):
self.filesystem.raise_os_error(errno.EISDIR, path)
dir_wrapper = FakeDirWrapper(obj, path, self.filesystem)
file_des = self.filesystem._add_open_file(dir_wrapper)
dir_wrapper.filedes = file_des
return file_des
# low level open is always binary
str_flags = 'b'
delete_on_close = has_tmpfile_flag
if hasattr(os, 'O_TEMPORARY'):
delete_on_close = flags & os.O_TEMPORARY == os.O_TEMPORARY
fake_file = FakeFileOpen(
self.filesystem, delete_on_close=delete_on_close, raw_io=True)(
path, str_flags, open_modes=open_modes)
if fake_file.file_object != self.filesystem.dev_null:
self.chmod(path, mode)
return fake_file.fileno()
def close(self, fd):
"""Close a file descriptor.
Args:
fd: An integer file descriptor for the file object requested.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
"""
file_handle = self.filesystem.get_open_file(fd)
file_handle.close()
def read(self, fd, n):
"""Read number of bytes from a file descriptor, returns bytes read.
Args:
fd: An integer file descriptor for the file object requested.
n: Number of bytes to read from file.
Returns:
Bytes read from file.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
"""
file_handle = self.filesystem.get_open_file(fd)
file_handle.raw_io = True
return file_handle.read(n)
def write(self, fd, contents):
"""Write string to file descriptor, returns number of bytes written.
Args:
fd: An integer file descriptor for the file object requested.
contents: String of bytes to write to file.
Returns:
Number of bytes written.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
"""
file_handle = self.filesystem.get_open_file(fd)
if isinstance(file_handle, FakeDirWrapper):
self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)
if isinstance(file_handle, FakePipeWrapper):
return file_handle.write(contents)
file_handle.raw_io = True
file_handle._sync_io()
file_handle.update_flush_pos()
file_handle.write(contents)
file_handle.flush()
return len(contents)
def pipe(self):
read_fd, write_fd = os.pipe()
read_wrapper = FakePipeWrapper(self.filesystem, read_fd)
file_des = self.filesystem._add_open_file(read_wrapper)
read_wrapper.filedes = file_des
write_wrapper = FakePipeWrapper(self.filesystem, write_fd)
file_des = self.filesystem._add_open_file(write_wrapper)
write_wrapper.filedes = file_des
return read_wrapper.filedes, write_wrapper.filedes
@staticmethod
def stat_float_times(newvalue=None):
"""Determine whether a file's time stamps are reported as floats
or ints.
Calling without arguments returns the current value. The value is
shared by all instances of FakeOsModule.
Args:
newvalue: If `True`, mtime, ctime, atime are reported as floats.
Otherwise, they are returned as ints (rounding down).
"""
return FakeStatResult.stat_float_times(newvalue)
def fstat(self, fd):
"""Return the os.stat-like tuple for the FakeFile object of file_des.
Args:
fd: The file descriptor of filesystem object to retrieve.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
"""
# stat should return the tuple representing return value of os.stat
file_object = self.filesystem.get_open_file(fd).get_object()
return file_object.stat_result.copy()
def umask(self, mask):
"""Change the current umask.
Args:
mask: (int) The new umask value.
Returns:
The old umask.
Raises:
TypeError: if new_mask is of an invalid type.
"""
if not is_int_type(mask):
raise TypeError('an integer is required')
old_umask = self.filesystem.umask
self.filesystem.umask = mask
return old_umask
def chdir(self, path):
"""Change current working directory to target directory.
Args:
path: The path to new current working directory.
Raises:
OSError: if user lacks permission to enter the argument directory
or if the target is not a directory.
"""
path = self.filesystem.resolve_path(
path, allow_fd=True)
self.filesystem.confirmdir(path)
directory = self.filesystem.resolve(path)
# A full implementation would check permissions all the way
# up the tree.
if not is_root() and not directory.st_mode | PERM_EXE:
self.filesystem.raise_os_error(errno.EACCES, directory)
self.filesystem.cwd = path
def getcwd(self):
"""Return current working directory."""
return self.filesystem.cwd
def getcwdb(self):
"""Return current working directory as bytes."""
return bytes(
self.filesystem.cwd, locale.getpreferredencoding(False))
def listdir(self, path):
"""Return a list of file names in target_directory.
Args:
path: Path to the target directory within the fake
filesystem.
Returns:
A list of file names within the target directory in arbitrary
order.
Raises:
OSError: if the target is not a directory.
"""
return self.filesystem.listdir(path)
XATTR_CREATE = 1
XATTR_REPLACE = 2
def getxattr(self, path, attribute, *, follow_symlinks=True):
"""Return the value of the given extended filesystem attribute for
`path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6).
attribute: (str or bytes) The attribute name.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Returns:
The contents of the extended attribute as bytes or None if
the attribute does not exist.
Raises:
OSError: if the path does not exist.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'getxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return file_obj.xattr.get(attribute)
def listxattr(self, path=None, *, follow_symlinks=True):
"""Return a list of the extended filesystem attributes on `path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6). If None, the current directory is used.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Returns:
A list of all attribute names for the given path as str.
Raises:
OSError: if the path does not exist.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'listxattr'")
if path is None:
path = self.getcwd()
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return list(file_obj.xattr.keys())
def removexattr(self, path, attribute, *, follow_symlinks=True):
"""Removes the extended filesystem attribute attribute from `path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6).
attribute: (str or bytes) The attribute name.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Raises:
OSError: if the path does not exist.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'removexattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
if attribute in file_obj.xattr:
del file_obj.xattr[attribute]
def setxattr(self, path, attribute, value,
flags=0, *, follow_symlinks=True):
"""Sets the value of the given extended filesystem attribute for
`path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6).
attribute: The attribute name (str or bytes).
value: (byte-like) The value to be set.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Raises:
OSError: if the path does not exist.
TypeError: if `value` is not a byte-like object.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'setxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
if not is_byte_string(value):
raise TypeError('a bytes-like object is required')
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
exists = attribute in file_obj.xattr
if exists and flags == self.XATTR_CREATE:
self.filesystem.raise_os_error(errno.ENODATA, file_obj.path)
if not exists and flags == self.XATTR_REPLACE:
self.filesystem.raise_os_error(errno.EEXIST, file_obj.path)
file_obj.xattr[attribute] = value
if use_scandir:
def scandir(self, path='.'):
"""Return an iterator of DirEntry objects corresponding to the
entries in the directory given by path.
Args:
path: Path to the target directory within the fake filesystem.
Returns:
An iterator to an unsorted list of os.DirEntry objects for
each entry in path.
Raises:
OSError: if the target is not a directory.
"""
return scandir(self.filesystem, path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
"""Perform an os.walk operation over the fake filesystem.
Args:
top: The root directory from which to begin walk.
topdown: Determines whether to return the tuples with the root as
the first entry (`True`) or as the last, after all the child
directory tuples (`False`).
onerror: If not `None`, function which will be called to handle the
`os.error` instance provided when `os.listdir()` fails.
followlinks: If `True`, symbolic links are followed.
Yields:
(path, directories, nondirectories) for top and each of its
subdirectories. See the documentation for the builtin os module
for further details.
"""
return walk(self.filesystem, top, topdown, onerror, followlinks)
def readlink(self, path, dir_fd=None):
"""Read the target of a symlink.
Args:
path: Symlink to read the target of.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Returns:
the string representing the path to which the symbolic link points.
Raises:
TypeError: if `path` is None
OSError: (with errno=ENOENT) if path is not a valid path, or
(with errno=EINVAL) if path is valid, but is not a symlink
"""
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
return self.filesystem.readlink(path)
def stat(self, path, *, dir_fd=None, follow_symlinks=True):
"""Return the os.stat-like tuple for the FakeFile object of entry_path.
Args:
path: path to filesystem object to retrieve.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `entry_path` being relative to this directory.
follow_symlinks: (bool) If `False` and `entry_path` points to a
symlink, the link itself is changed instead of the linked
object.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
"""
path = self._path_with_dir_fd(path, self.stat, dir_fd)
return self.filesystem.stat(path, follow_symlinks)
def lstat(self, path, *, dir_fd=None):
"""Return the os.stat-like tuple for entry_path, not following symlinks.
Args:
path: path to filesystem object to retrieve.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
Returns:
the FakeStatResult object corresponding to `path`.
Raises:
OSError: if the filesystem object doesn't exist.
"""
# stat should return the tuple representing return value of os.stat
path = self._path_with_dir_fd(path, self.lstat, dir_fd)
return self.filesystem.stat(path, follow_symlinks=False)
def remove(self, path, dir_fd=None):
"""Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
"""
path = self._path_with_dir_fd(path, self.remove, dir_fd)
self.filesystem.remove(path)
def unlink(self, path, *, dir_fd=None):
"""Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
"""
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
self.filesystem.remove(path)
def rename(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
"""Rename a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Also replaces existing new_file_path object, if one existed
(Unix only).
Args:
src: Path to filesystem object to rename.
dst: Path to where the filesystem object will live
after this call.
src_dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
dst_dir_fd: If not `None`, the file descriptor of a directory,
with `dst` being relative to this directory.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory.
OSError: if new_file_path is an existing file (Windows only)
OSError: if new_file_path is an existing file and could not
be removed (Unix)
OSError: if `dirname(new_file)` does not exist
OSError: if the file would be moved to another filesystem
(e.g. mount point)
"""
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst)
def replace(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
"""Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Also replaces existing new_file_path object, if one existed.
Arg
src: Path to filesystem object to rename.
dst: Path to where the filesystem object will live
after this call.
src_dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
dst_dir_fd: If not `None`, the file descriptor of a directory,
with `dst` being relative to this directory.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory.
OSError: if new_file_path is an existing file and could
not be removed
OSError: if `dirname(new_file)` does not exist
OSError: if the file would be moved to another filesystem
(e.g. mount point)
"""
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst, force_replace=True)
def rmdir(self, path, *, dir_fd=None):
"""Remove a leaf Fake directory.
Args:
path: (str) Name of directory to remove.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if `path` does not exist or is not a directory,
or as per FakeFilesystem.remove_object. Cannot remove '.'.
"""
path = self._path_with_dir_fd(path, self.rmdir, dir_fd)
self.filesystem.rmdir(path)
def removedirs(self, name):
"""Remove a leaf fake directory and all empty intermediate ones.
Args:
name: the directory to be removed.
Raises:
OSError: if target_directory does not exist or is not a directory.
OSError: if target_directory is not empty.
"""
name = self.filesystem.absnormpath(name)
directory = self.filesystem.confirmdir(name)
if directory.contents:
self.filesystem.raise_os_error(
errno.ENOTEMPTY, self.path.basename(name))
else:
self.rmdir(name)
head, tail = self.path.split(name)
if not tail:
head, tail = self.path.split(head)
while head and tail:
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
# only the top-level dir may not be a symlink
self.filesystem.rmdir(head, allow_symlink=True)
head, tail = self.path.split(head)
def mkdir(self, path, mode=PERM_DEF, *, dir_fd=None):
"""Create a leaf Fake directory.
Args:
path: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per FakeFilesystem.add_object.
"""
path = self._path_with_dir_fd(path, self.mkdir, dir_fd)
try:
self.filesystem.makedir(path, mode)
except OSError as e:
if e.errno == errno.EACCES:
self.filesystem.raise_os_error(e.errno, path)
raise
def makedirs(self, name, mode=PERM_DEF, exist_ok=None):
"""Create a leaf Fake directory + create any non-existent parent dirs.
Args:
name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError
is raised if the target directory already exists.
Raises:
OSError: if the directory already exists and exist_ok=False, or as
per :py:meth:`FakeFilesystem.create_dir`.
"""
if exist_ok is None:
exist_ok = False
self.filesystem.makedirs(name, mode, exist_ok)
def _path_with_dir_fd(self, path, fct, dir_fd):
"""Return the path considering dir_fd. Raise on invalid parameters."""
path = to_string(path)
if dir_fd is not None:
# check if fd is supported for the built-in real function
real_fct = getattr(os, fct.__name__)
if real_fct not in self.supports_dir_fd:
raise NotImplementedError(
'dir_fd unavailable on this platform')
if isinstance(path, int):
raise ValueError("%s: Can't specify dir_fd without "
"matching path" % fct.__name__)
if not self.path.isabs(path):
return self.path.join(
self.filesystem.get_open_file(
dir_fd).get_object().path, path)
return path
def access(self, path, mode, *, dir_fd=None, follow_symlinks=True):
"""Check if a file exists and has the specified permissions.
Args:
path: (str) Path to the file.
mode: (int) Permissions represented as a bitwise-OR combination of
os.F_OK, os.R_OK, os.W_OK, and os.X_OK.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
Returns:
bool, `True` if file is accessible, `False` otherwise.
"""
path = self._path_with_dir_fd(path, self.access, dir_fd)
try:
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
except OSError as os_error:
if os_error.errno == errno.ENOENT:
return False
raise
if is_root():
mode &= ~os.W_OK
return (mode & ((stat_result.st_mode >> 6) & 7)) == mode
def chmod(self, path, mode, *, dir_fd=None, follow_symlinks=True):
"""Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
"""
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
self.filesystem.chmod(path, mode, follow_symlinks)
def lchmod(self, path, mode):
"""Change the permissions of a file as encoded in integer mode.
If the file is a link, the permissions of the link are changed.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
"""
if self.filesystem.is_windows_fs:
raise (NameError, "name 'lchmod' is not defined")
self.filesystem.chmod(path, mode, follow_symlinks=False)
def utime(self, path, times=None, ns=None,
dir_fd=None, follow_symlinks=True):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If None, both times are set to the current time.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
path = self._path_with_dir_fd(path, self.utime, dir_fd)
self.filesystem.utime(
path, times=times, ns=ns, follow_symlinks=follow_symlinks)
def chown(self, path, uid, gid, *, dir_fd=None, follow_symlinks=True):
"""Set ownership of a faked file.
Args:
path: (str) Path to the file or directory.
uid: (int) Numeric uid to set the file or directory to.
gid: (int) Numeric gid to set the file or directory to.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
follow_symlinks: (bool) If `False` and path points to a symlink,
the link itself is changed instead of the linked object.
Raises:
OSError: if path does not exist.
`None` is also allowed for `uid` and `gid`. This permits `os.rename`
to use `os.chown` even when the source file `uid` and `gid` are
`None` (unset).
"""
path = self._path_with_dir_fd(path, self.chown, dir_fd)
file_object = self.filesystem.resolve(
path, follow_symlinks, allow_fd=True)
if not ((is_int_type(uid) or uid is None) and
(is_int_type(gid) or gid is None)):
raise TypeError("An integer is required")
if uid != -1:
file_object.st_uid = uid
if gid != -1:
file_object.st_gid = gid
def mknod(self, path, mode=None, device=0, *, dir_fd=None):
"""Create a filesystem node named 'filename'.
Does not support device special files or named pipes as the real os
module does.
Args:
path: (str) Name of the file to create
mode: (int) Permissions to use and type of file to be created.
Default permissions are 0o666. Only the stat.S_IFREG file type
is supported by the fake implementation. The umask is applied
to this mode.
device: not supported in fake implementation
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if called with unsupported options or the file can not be
created.
"""
if self.filesystem.is_windows_fs:
raise (AttributeError, "module 'os' has no attribute 'mknode'")
if mode is None:
# note that a default value of 0o600 without a device type is
# documented - this is not how it seems to work
mode = S_IFREG | 0o600
if device or not mode & S_IFREG and not is_root():
self.filesystem.raise_os_error(errno.EPERM)
path = self._path_with_dir_fd(path, self.mknod, dir_fd)
head, tail = self.path.split(path)
if not tail:
if self.filesystem.exists(head, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.raise_os_error(errno.ENOENT, path)
if tail in (b'.', u'.', b'..', u'..'):
self.filesystem.raise_os_error(errno.ENOENT, path)
if self.filesystem.exists(path, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.add_object(head, FakeFile(
tail, mode & ~self.filesystem.umask,
filesystem=self.filesystem))
def symlink(self, src, dst, *, dir_fd=None):
"""Creates the specified symlink, pointed at the specified link target.
Args:
src: The target of the symlink.
dst: Path to the symlink to create.
dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
Raises:
OSError: if the file already exists.
"""
src = self._path_with_dir_fd(src, self.symlink, dir_fd)
self.filesystem.create_symlink(
dst, src, create_missing_dirs=False)
def link(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
"""Create a hard link at new_path, pointing at old_path.
Args:
src: An existing path to the target file.
dst: The destination path to create a new link at.
src_dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
dst_dir_fd: If not `None`, the file descriptor of a directory,
with `dst` being relative to this directory.
Returns:
The FakeFile object referred to by `src`.
Raises:
OSError: if something already exists at new_path.
OSError: if the parent directory doesn't exist.
"""
src = self._path_with_dir_fd(src, self.link, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.link, dst_dir_fd)
self.filesystem.link(src, dst)
def fsync(self, fd):
"""Perform fsync for a fake file (in other words, do nothing).
Args:
fd: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer.
"""
# Throw an error if file_des isn't valid
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(fd)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path)
def fdatasync(self, fd):
"""Perform fdatasync for a fake file (in other words, do nothing).
Args:
fd: The file descriptor of the open file.
Raises:
OSError: `fd` is an invalid file descriptor.
TypeError: `fd` is not an integer.
"""
if self.filesystem.is_windows_fs or self.filesystem.is_macos:
raise AttributeError("module 'os' has no attribute 'fdatasync'")
# Throw an error if file_des isn't valid
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
self.filesystem.get_open_file(fd)
def sendfile(self, fd_out, fd_in, offset, count):
"""Copy count bytes from file descriptor fd_in to file descriptor
fd_out starting at offset.
Args:
fd_out: The file descriptor of the destination file.
fd_in: The file descriptor of the source file.
offset: The offset in bytes where to start the copy in the
source file. If `None` (Linux only), copying is started at
the current position, and the position is updated.
count: The number of bytes to copy. If 0, all remaining bytes
are copied (MacOs only).
Raises:
OSError: If `fd_in` or `fd_out` is an invalid file descriptor.
TypeError: If `fd_in` or `fd_out` is not an integer.
TypeError: If `offset` is None under MacOs.
"""
if self.filesystem.is_windows_fs:
raise AttributeError("module 'os' has no attribute 'sendfile'")
if 0 <= fd_in < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
if 0 <= fd_out < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
source = self.filesystem.get_open_file(fd_in)
dest = self.filesystem.get_open_file(fd_out)
if self.filesystem.is_macos:
if dest.get_object().stat_result.st_mode & 0o777000 != S_IFSOCK:
raise OSError('Socket operation on non-socket')
if offset is None:
if self.filesystem.is_macos:
raise TypeError('None is not a valid offset')
contents = source.read(count)
else:
position = source.tell()
source.seek(offset)
if count == 0 and self.filesystem.is_macos:
contents = source.read()
else:
contents = source.read(count)
source.seek(position)
if contents:
written = dest.write(contents)
dest.flush()
return written
return 0
def __getattr__(self, name):
"""Forwards any unfaked calls to the standard os module."""
return getattr(self._os_module, name)
class FakeIoModule:
"""Uses FakeFilesystem to provide a fake io module replacement.
Currently only used to wrap `io.open()` which is an alias to `open()`.
You need a fake_filesystem to use this:
filesystem = fake_filesystem.FakeFilesystem()
my_io_module = fake_filesystem.FakeIoModule(filesystem)
"""
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
return 'open',
def __init__(self, filesystem):
"""
Args:
filesystem: FakeFilesystem used to provide file system information.
"""
self.filesystem = filesystem
self._io_module = io
def open(self, file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None):
"""Redirect the call to FakeFileOpen.
See FakeFileOpen.call() for description.
"""
fake_open = FakeFileOpen(self.filesystem)
return fake_open(file, mode, buffering, encoding, errors,
newline, closefd, opener)
def __getattr__(self, name):
"""Forwards any unfaked calls to the standard io module."""
return getattr(self._io_module, name)
class FakeFileWrapper:
"""Wrapper for a stream object for use by a FakeFile object.
If the wrapper has any data written to it, it will propagate to
the FakeFile object on close() or flush().
"""
def __init__(self, file_object, file_path, update=False, read=False,
append=False, delete_on_close=False, filesystem=None,
newline=None, binary=True, closefd=True, encoding=None,
errors=None, raw_io=False, is_stream=False):
self.file_object = file_object
self.file_path = file_path
self._append = append
self._read = read
self.allow_update = update
self._closefd = closefd
self._file_epoch = file_object.epoch
self.raw_io = raw_io
self._binary = binary
self.is_stream = is_stream
self._changed = False
contents = file_object.byte_contents
self._encoding = encoding or locale.getpreferredencoding(False)
errors = errors or 'strict'
buffer_class = (NullFileBufferIO if file_object == filesystem.dev_null
else FileBufferIO)
self._io = buffer_class(contents, linesep=filesystem.line_separator(),
binary=binary, encoding=encoding,
newline=newline, errors=errors)
self._read_whence = 0
self._read_seek = 0
self._flush_pos = 0
if contents:
self._flush_pos = len(contents)
if update:
if not append:
self._io.seek(0)
else:
self._io.seek(self._flush_pos)
self._read_seek = self._io.tell()
if delete_on_close:
assert filesystem, 'delete_on_close=True requires filesystem'
self._filesystem = filesystem
self.delete_on_close = delete_on_close
# override, don't modify FakeFile.name, as FakeFilesystem expects
# it to be the file name only, no directories.
self.name = file_object.opened_as
self.filedes = None
def __enter__(self):
"""To support usage of this fake file with the 'with' statement."""
return self
def __exit__(self, type, value, traceback):
"""To support usage of this fake file with the 'with' statement."""
self.close()
def _raise(self, message):
if self.raw_io:
self._filesystem.raise_os_error(errno.EBADF, self.file_path)
raise io.UnsupportedOperation(message)
def get_object(self):
"""Return the FakeFile object that is wrapped by the current instance.
"""
return self.file_object
def fileno(self):
"""Return the file descriptor of the file object."""
return self.filedes
def close(self):
"""Close the file."""
# ignore closing a closed file
if not self._is_open():
return
# for raw io, all writes are flushed immediately
if self.allow_update and not self.raw_io:
self.flush()
if self._filesystem.is_windows_fs and self._changed:
self.file_object.st_mtime = time.time()
if self._closefd:
self._filesystem._close_open_file(self.filedes)
else:
self._filesystem.open_files[self.filedes].remove(self)
if self.delete_on_close:
self._filesystem.remove_object(self.get_object().path)
@property
def closed(self):
"""Simulate the `closed` attribute on file."""
return not self._is_open()
def flush(self):
"""Flush file contents to 'disk'."""
self._check_open_file()
if self.allow_update and not self.is_stream:
contents = self._io.getvalue()
if self._append:
self._sync_io()
old_contents = (self.file_object.byte_contents
if is_byte_string(contents) else
self.file_object.contents)
contents = old_contents + contents[self._flush_pos:]
self._set_stream_contents(contents)
self.update_flush_pos()
else:
self._io.flush()
if self.file_object.set_contents(contents, self._encoding):
if self._filesystem.is_windows_fs:
self._changed = True
else:
current_time = time.time()
self.file_object.st_ctime = current_time
self.file_object.st_mtime = current_time
self._file_epoch = self.file_object.epoch
if not self.is_stream:
self._flush_related_files()
def update_flush_pos(self):
self._flush_pos = self._io.tell()
def _flush_related_files(self):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
not open_file._append):
open_file._sync_io()
def seek(self, offset, whence=0):
"""Move read/write pointer in 'file'."""
self._check_open_file()
if not self._append:
self._io.seek(offset, whence)
else:
self._read_seek = offset
self._read_whence = whence
if not self.is_stream:
self.flush()
def tell(self):
"""Return the file's current position.
Returns:
int, file's current position in bytes.
"""
self._check_open_file()
if not self.is_stream:
self.flush()
if not self._append:
return self._io.tell()
if self._read_whence:
write_seek = self._io.tell()
self._io.seek(self._read_seek, self._read_whence)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(write_seek)
return self._read_seek
def _sync_io(self):
"""Update the stream with changes to the file object contents."""
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch
def _set_stream_contents(self, contents):
whence = self._io.tell()
self._io.seek(0)
self._io.truncate()
if not self._io.binary and is_byte_string(contents):
contents = contents.decode(self._encoding)
self._io.putvalue(contents)
if not self._append:
self._io.seek(whence)
def _read_wrappers(self, name):
"""Wrap a stream attribute in a read wrapper.
Returns a read_wrapper which tracks our own read pointer since the
stream object has no concept of a different read and write pointer.
Args:
name: The name of the attribute to wrap. Should be a read call.
Returns:
The read_wrapper function.
"""
io_attr = getattr(self._io, name)
def read_wrapper(*args, **kwargs):
"""Wrap all read calls to the stream object.
We do this to track the read pointer separate from the write
pointer. Anything that wants to read from the stream object
while we're in append mode goes through this.
Args:
*args: pass through args
**kwargs: pass through kwargs
Returns:
Wrapped stream object method
"""
self._io.seek(self._read_seek, self._read_whence)
ret_value = io_attr(*args, **kwargs)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(0, 2)
return ret_value
return read_wrapper
def _other_wrapper(self, name, writing):
"""Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
"""Wrap all other calls to the stream Object.
We do this to track changes to the write pointer. Anything that
moves the write pointer in a file open for appending should move
the read pointer as well.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
Wrapped stream object method.
"""
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
return ret_value
return other_wrapper
def _adapt_size_for_related_files(self, size):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
open_file._append):
open_file._read_seek += size
def _truncate_wrapper(self):
"""Wrap truncate() to allow flush after truncate.
Returns:
Wrapper which is described below.
"""
io_attr = getattr(self._io, 'truncate')
def truncate_wrapper(*args, **kwargs):
"""Wrap truncate call to call flush after truncate."""
if self._append:
self._io.seek(self._read_seek, self._read_whence)
size = io_attr(*args, **kwargs)
self.flush()
if not self.is_stream:
self.file_object.size = size
buffer_size = len(self._io.getvalue())
if buffer_size < size:
self._io.seek(buffer_size)
self._io.write('\0' * (size - buffer_size))
self.file_object.set_contents(
self._io.getvalue(), self._encoding)
self._flush_pos = size
self._adapt_size_for_related_files(size - buffer_size)
self.flush()
return size
return truncate_wrapper
def size(self):
"""Return the content size in bytes of the wrapped file."""
return self.file_object.st_size
def __getattr__(self, name):
if self.file_object.is_large_file():
raise FakeLargeFileIoException(self.file_path)
reading = name.startswith('read') or name == 'next'
truncate = name == 'truncate'
writing = name.startswith('write') or truncate
if reading or writing:
self._check_open_file()
if not self._read and reading:
return self._read_error()
if not self.allow_update and writing:
return self._write_error()
if reading:
self._sync_io()
if not self.is_stream:
self.flush()
if not self._filesystem.is_windows_fs:
self.file_object.st_atime = time.time()
if truncate:
return self._truncate_wrapper()
if self._append:
if reading:
return self._read_wrappers(name)
else:
return self._other_wrapper(name, writing)
return getattr(self._io, name)
def _read_error(self):
def read_error(*args, **kwargs):
"""Throw an error unless the argument is zero."""
if args and args[0] == 0:
if self._filesystem.is_windows_fs and self.raw_io:
return b'' if self._binary else u''
self._raise('File is not open for reading.')
return read_error
def _write_error(self):
def write_error(*args, **kwargs):
"""Throw an error."""
if self.raw_io:
if (self._filesystem.is_windows_fs and args
and len(args[0]) == 0):
return 0
self._raise('File is not open for writing.')
return write_error
def _is_open(self):
return (self.filedes < len(self._filesystem.open_files) and
self._filesystem.open_files[self.filedes] is not None and
self in self._filesystem.open_files[self.filedes])
def _check_open_file(self):
if not self.is_stream and not self._is_open():
raise ValueError('I/O operation on closed file')
def __iter__(self):
if not self._read:
self._raise('File is not open for reading')
return self._io.__iter__()
def __next__(self):
if not self._read:
self._raise('File is not open for reading')
return next(self._io)
class StandardStreamWrapper:
"""Wrapper for a system standard stream to be used in open files list.
"""
def __init__(self, stream_object):
self._stream_object = stream_object
self.filedes = None
def get_object(self):
return self._stream_object
def fileno(self):
"""Return the file descriptor of the wrapped standard stream."""
return self.filedes
def close(self):
"""We do not support closing standard streams."""
pass
def is_stream(self):
return True
class FakeDirWrapper:
"""Wrapper for a FakeDirectory object to be used in open files list.
"""
def __init__(self, file_object, file_path, filesystem):
self.file_object = file_object
self.file_path = file_path
self._filesystem = filesystem
self.filedes = None
def get_object(self):
"""Return the FakeFile object that is wrapped by the current instance.
"""
return self.file_object
def fileno(self):
"""Return the file descriptor of the file object."""
return self.filedes
def close(self):
"""Close the directory."""
self._filesystem._close_open_file(self.filedes)
class FakePipeWrapper:
"""Wrapper for a read or write descriptor of a real pipe object to be
used in open files list.
"""
def __init__(self, filesystem, fd):
self._filesystem = filesystem
self.fd = fd # the real file descriptor
self.file_object = None
self.filedes = None
def get_object(self):
return self.file_object
def fileno(self):
"""Return the fake file descriptor of the pipe object."""
return self.filedes
def read(self, numBytes):
"""Read from the real pipe."""
return os.read(self.fd, numBytes)
def write(self, contents):
"""Write to the real pipe."""
return os.write(self.fd, contents)
def close(self):
"""Close the pipe descriptor."""
self._filesystem.open_files[self.filedes].remove(self)
os.close(self.fd)
Deprecator.add(FakeFileWrapper, FakeFileWrapper.get_object, 'GetObject')
Deprecator.add(FakeFileWrapper, FakeFileWrapper.size, 'Size')
class FakeFileOpen:
"""Faked `file()` and `open()` function replacements.
Returns FakeFile objects in a FakeFilesystem in place of the `file()`
or `open()` function.
"""
__name__ = 'FakeFileOpen'
def __init__(self, filesystem, delete_on_close=False, raw_io=False):
"""
Args:
filesystem: FakeFilesystem used to provide file system information
delete_on_close: optional boolean, deletes file on close()
"""
self.filesystem = filesystem
self._delete_on_close = delete_on_close
self.raw_io = raw_io
def __call__(self, *args, **kwargs):
"""Redirects calls to file() or open() to appropriate method."""
return self.call(*args, **kwargs)
def call(self, file_, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None,
open_modes=None):
"""Return a file-like object with the contents of the target
file object.
Args:
file_: Path to target file or a file descriptor.
mode: Additional file modes (all modes in `open()` are supported).
buffering: ignored. (Used for signature compliance with
__builtin__.open)
encoding: The encoding used to encode unicode strings / decode
bytes.
errors: (str) Defines how encoding errors are handled.
newline: Controls universal newlines, passed to stream object.
closefd: If a file descriptor rather than file name is passed,
and this is set to `False`, then the file descriptor is kept
open when file is closed.
opener: not supported.
open_modes: Modes for opening files if called from low-level API.
Returns:
A file-like object containing the contents of the target file.
Raises:
OSError depending on Python version / call mode:
- if the target object is a directory
- on an invalid path
- if the file does not exist when it should
- if the file exists but should not
- if permission is denied
ValueError: for an invalid mode or mode combination
"""
binary = 'b' in mode
newline, open_modes = self._handle_file_mode(mode, newline, open_modes)
file_object, file_path, filedes, real_path = self._handle_file_arg(
file_)
if not filedes:
closefd = True
if (open_modes.must_not_exist and
(file_object or self.filesystem.islink(file_path) and
not self.filesystem.is_windows_fs)):
self.filesystem.raise_os_error(errno.EEXIST, file_path)
file_object = self._init_file_object(file_object,
file_path, open_modes,
real_path)
if S_ISDIR(file_object.st_mode):
if self.filesystem.is_windows_fs:
self.filesystem.raise_os_error(errno.EACCES, file_path)
else:
self.filesystem.raise_os_error(errno.EISDIR, file_path)
# If you print obj.name, the argument to open() must be printed.
# Not the abspath, not the filename, but the actual argument.
file_object.opened_as = file_path
if open_modes.truncate:
current_time = time.time()
file_object.st_mtime = current_time
if not self.filesystem.is_windows_fs:
file_object.st_ctime = current_time
fakefile = FakeFileWrapper(file_object,
file_path,
update=open_modes.can_write,
read=open_modes.can_read,
append=open_modes.append,
delete_on_close=self._delete_on_close,
filesystem=self.filesystem,
newline=newline,
binary=binary,
closefd=closefd,
encoding=encoding,
errors=errors,
raw_io=self.raw_io)
if filedes is not None:
fakefile.filedes = filedes
# replace the file wrapper
self.filesystem.open_files[filedes].append(fakefile)
else:
fakefile.filedes = self.filesystem._add_open_file(fakefile)
return fakefile
def _init_file_object(self, file_object, file_path,
open_modes, real_path):
if file_object:
if (not is_root() and
((open_modes.can_read and
not file_object.st_mode & PERM_READ)
or (open_modes.can_write and
not file_object.st_mode & PERM_WRITE))):
self.filesystem.raise_os_error(errno.EACCES, file_path)
if open_modes.can_write:
if open_modes.truncate:
file_object.set_contents('')
else:
if open_modes.must_exist:
self.filesystem.raise_os_error(errno.ENOENT, file_path)
if self.filesystem.islink(file_path):
link_object = self.filesystem.resolve(file_path,
follow_symlinks=False)
target_path = link_object.contents
else:
target_path = file_path
if self.filesystem.ends_with_path_separator(target_path):
error = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOENT if self.filesystem.is_macos
else errno.EISDIR)
self.filesystem.raise_os_error(error, file_path)
file_object = self.filesystem.create_file_internally(
real_path, create_missing_dirs=False,
apply_umask=True, raw_io=self.raw_io)
return file_object
def _handle_file_arg(self, file_):
file_object = None
if isinstance(file_, int):
# opening a file descriptor
filedes = file_
wrapper = self.filesystem.get_open_file(filedes)
self._delete_on_close = wrapper.delete_on_close
file_object = self.filesystem.get_open_file(filedes).get_object()
file_path = file_object.name
real_path = file_path
else:
# open a file file by path
filedes = None
file_path = file_
if file_path == self.filesystem.dev_null.name:
file_object = self.filesystem.dev_null
real_path = file_path
else:
real_path = self.filesystem.resolve_path(
file_path, raw_io=self.raw_io)
if self.filesystem.exists(file_path):
file_object = self.filesystem.get_object_from_normpath(
real_path, check_read_perm=False)
return file_object, file_path, filedes, real_path
def _handle_file_mode(self, mode, newline, open_modes):
orig_modes = mode # Save original modes for error messages.
# Normalize modes. Handle 't' and 'U'.
if 'b' in mode and 't' in mode:
raise ValueError('Invalid mode: ' + mode)
mode = mode.replace('t', '').replace('b', '')
mode = mode.replace('rU', 'r').replace('U', 'r')
if not self.raw_io:
if mode not in _OPEN_MODE_MAP:
raise ValueError('Invalid mode: %r' % orig_modes)
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
return newline, open_modes
def _run_doctest():
import doctest
import pyfakefs
return doctest.testmod(pyfakefs.fake_filesystem)
if __name__ == '__main__':
_run_doctest()
| 39.383487 | 80 | 0.605801 |
import errno
import heapq
import io
import locale
import os
import sys
import time
import uuid
from collections import namedtuple
from stat import (
S_IFREG, S_IFDIR, S_ISLNK, S_IFMT, S_ISDIR, S_IFLNK, S_ISREG, S_IFSOCK
)
from pyfakefs.deprecator import Deprecator
from pyfakefs.extra_packages import use_scandir
from pyfakefs.fake_scandir import scandir, walk
from pyfakefs.helpers import (
FakeStatResult, FileBufferIO, NullFileBufferIO,
is_int_type, is_byte_string, is_unicode_string,
make_string_path, IS_WIN, to_string)
from pyfakefs import __version__
__pychecker__ = 'no-reimportself'
PERM_READ = 0o400
PERM_WRITE = 0o200
PERM_EXE = 0o100
PERM_DEF = 0o777
PERM_DEF_FILE = 0o666
PERM_ALL = 0o7777
_OpenModes = namedtuple(
'open_modes',
'must_exist can_read can_write truncate append must_not_exist'
)
_OPEN_MODE_MAP = {
'r': (True, True, False, False, False, False),
'w': (False, False, True, True, False, False),
'a': (False, False, True, False, True, False),
'r+': (True, True, True, False, False, False),
'w+': (False, True, True, True, False, False),
'a+': (False, True, True, False, True, False),
'x': (False, False, True, False, False, True),
'x+': (False, True, True, False, False, True)
}
if sys.platform.startswith('linux'):
_MAX_LINK_DEPTH = 40
else:
_MAX_LINK_DEPTH = 32
NR_STD_STREAMS = 3
USER_ID = 1 if IS_WIN else os.getuid()
GROUP_ID = 1 if IS_WIN else os.getgid()
def set_uid(uid):
global USER_ID
USER_ID = uid
def set_gid(gid):
global GROUP_ID
GROUP_ID = gid
def reset_ids():
set_uid(1 if IS_WIN else os.getuid())
set_gid(1 if IS_WIN else os.getgid())
def is_root():
return USER_ID == 0
class FakeLargeFileIoException(Exception):
def __init__(self, file_path):
super(FakeLargeFileIoException, self).__init__(
'Read and write operations not supported for '
'fake large file: %s' % file_path)
def _copy_module(old):
saved = sys.modules.pop(old.__name__, None)
new = __import__(old.__name__)
sys.modules[old.__name__] = saved
return new
class FakeFile:
stat_types = (
'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime',
'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns'
)
def __init__(self, name, st_mode=S_IFREG | PERM_DEF_FILE,
contents=None, filesystem=None, encoding=None, errors=None,
side_effect=None):
if filesystem is None:
raise ValueError('filesystem shall not be None')
self.filesystem = filesystem
self._side_effect = side_effect
self.name = name
self.stat_result = FakeStatResult(
filesystem.is_windows_fs, USER_ID, GROUP_ID, time.time())
self.stat_result.st_mode = st_mode
self.encoding = encoding
self.errors = errors or 'strict'
self._byte_contents = self._encode_contents(contents)
self.stat_result.st_size = (
len(self._byte_contents) if self._byte_contents is not None else 0)
self.epoch = 0
self.parent_dir = None
self.xattr = {}
@property
def byte_contents(self):
return self._byte_contents
@property
def contents(self):
if isinstance(self.byte_contents, bytes):
return self.byte_contents.decode(
self.encoding or locale.getpreferredencoding(False),
errors=self.errors)
return self.byte_contents
@property
def st_ctime(self):
return self.stat_result.st_ctime
@property
def st_atime(self):
return self.stat_result.st_atime
@property
def st_mtime(self):
return self.stat_result.st_mtime
@st_ctime.setter
def st_ctime(self, val):
self.stat_result.st_ctime = val
@st_atime.setter
def st_atime(self, val):
self.stat_result.st_atime = val
@st_mtime.setter
def st_mtime(self, val):
self.stat_result.st_mtime = val
def set_large_file_size(self, st_size):
self._check_positive_int(st_size)
if self.st_size:
self.size = 0
if self.filesystem:
self.filesystem.change_disk_usage(st_size, self.name, self.st_dev)
self.st_size = st_size
self._byte_contents = None
def _check_positive_int(self, size):
if not is_int_type(size) or size < 0:
self.filesystem.raise_os_error(errno.ENOSPC, self.name)
def is_large_file(self):
return self._byte_contents is None
def _encode_contents(self, contents):
if is_unicode_string(contents):
contents = bytes(
contents,
self.encoding or locale.getpreferredencoding(False),
self.errors)
return contents
def _set_initial_contents(self, contents):
contents = self._encode_contents(contents)
changed = self._byte_contents != contents
st_size = len(contents)
if self._byte_contents:
self.size = 0
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
self._byte_contents = contents
self.st_size = st_size
self.epoch += 1
return changed
def set_contents(self, contents, encoding=None):
self.encoding = encoding
changed = self._set_initial_contents(contents)
if self._side_effect is not None:
self._side_effect(self)
return changed
@property
def size(self):
return self.st_size
@property
def path(self):
names = []
obj = self
while obj:
names.insert(0, obj.name)
obj = obj.parent_dir
sep = self.filesystem._path_separator(self.name)
if names[0] == sep:
names.pop(0)
dir_path = sep.join(names)
is_drive = names and len(names[0]) == 2 and names[0][1] == ':'
if not is_drive:
dir_path = sep + dir_path
else:
dir_path = sep.join(names)
dir_path = self.filesystem.absnormpath(dir_path)
return dir_path
@Deprecator('property path')
def GetPath(self):
return self.path
@Deprecator('property size')
def GetSize(self):
return self.size
@size.setter
def size(self, st_size):
self._check_positive_int(st_size)
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
if self._byte_contents:
if st_size < current_size:
self._byte_contents = self._byte_contents[:st_size]
else:
self._byte_contents += b'\0' * (st_size - current_size)
self.st_size = st_size
self.epoch += 1
@Deprecator('property size')
def SetSize(self, value):
self.size = value
@Deprecator('property st_atime')
def SetATime(self, st_atime):
self.st_atime = st_atime
@Deprecator('property st_mtime')
def SetMTime(self, st_mtime):
self.st_mtime = st_mtime
@Deprecator('property st_ctime')
def SetCTime(self, st_ctime):
self.st_ctime = st_ctime
def __getattr__(self, item):
if item in self.stat_types:
return getattr(self.stat_result, item)
return super(FakeFile, self).__getattr__(item)
def __setattr__(self, key, value):
if key in self.stat_types:
return setattr(self.stat_result, key, value)
return super(FakeFile, self).__setattr__(key, value)
def __str__(self):
return '%s(%o)' % (self.name, self.st_mode)
@Deprecator('st_ino')
def SetIno(self, st_ino):
self.st_ino = st_ino
class FakeNullFile(FakeFile):
def __init__(self, filesystem):
devnull = '/dev/nul' if filesystem.is_windows_fs else '/dev/nul'
super(FakeNullFile, self).__init__(
devnull, filesystem=filesystem, contents=b'')
@property
def byte_contents(self):
return b''
def _set_initial_contents(self, contents):
pass
Deprecator.add(FakeFile, FakeFile.set_large_file_size, 'SetLargeFileSize')
Deprecator.add(FakeFile, FakeFile.set_contents, 'SetContents')
Deprecator.add(FakeFile, FakeFile.is_large_file, 'IsLargeFile')
class FakeFileFromRealFile(FakeFile):
def __init__(self, file_path, filesystem, side_effect=None):
super(FakeFileFromRealFile, self).__init__(
name=os.path.basename(file_path), filesystem=filesystem,
side_effect=side_effect)
self.contents_read = False
@property
def byte_contents(self):
if not self.contents_read:
self.contents_read = True
with io.open(self.file_path, 'rb') as f:
self._byte_contents = f.read()
self.st_atime = os.stat(self.file_path).st_atime
return self._byte_contents
def set_contents(self, contents, encoding=None):
self.contents_read = True
super(FakeFileFromRealFile, self).set_contents(contents, encoding)
def is_large_file(self):
return False
class FakeDirectory(FakeFile):
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
FakeFile.__init__(
self, name, S_IFDIR | perm_bits, {}, filesystem=filesystem)
self.st_nlink += 1
def set_contents(self, contents, encoding=None):
raise self.filesystem.raise_os_error(errno.EISDIR, self.path)
@property
def contents(self):
return self.byte_contents
@property
def ordered_dirs(self):
return [item[0] for item in sorted(
self.byte_contents.items(), key=lambda entry: entry[1].st_ino)]
def add_entry(self, path_object):
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
raise OSError(errno.EACCES, 'Permission Denied', self.path)
path_object_name = to_string(path_object.name)
if path_object_name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object_name] = path_object
path_object.parent_dir = self
if path_object.st_ino is None:
self.filesystem.last_ino += 1
path_object.st_ino = self.filesystem.last_ino
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev)
def get_entry(self, pathname_name):
pathname_name = self._normalized_entryname(pathname_name)
return self.contents[to_string(pathname_name)]
def _normalized_entryname(self, pathname_name):
if not self.filesystem.is_case_sensitive:
matching_names = [name for name in self.contents
if name.lower() == pathname_name.lower()]
if matching_names:
pathname_name = matching_names[0]
return pathname_name
def remove_entry(self, pathname_name, recursive=True):
pathname_name = self._normalized_entryname(pathname_name)
entry = self.get_entry(pathname_name)
if self.filesystem.is_windows_fs:
if entry.st_mode & PERM_WRITE == 0:
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if self.filesystem.has_open_file(entry):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
else:
if (not is_root() and (self.st_mode & (PERM_WRITE | PERM_EXE) !=
PERM_WRITE | PERM_EXE)):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if recursive and isinstance(entry, FakeDirectory):
while entry.contents:
entry.remove_entry(list(entry.contents)[0])
elif entry.st_nlink == 1:
self.filesystem.change_disk_usage(
-entry.size, pathname_name, entry.st_dev)
self.st_nlink -= 1
entry.st_nlink -= 1
assert entry.st_nlink >= 0
del self.contents[to_string(pathname_name)]
@property
def size(self):
return sum([item[1].size for item in self.contents.items()])
@Deprecator('property size')
def GetSize(self):
return self.size
def has_parent_object(self, dir_object):
obj = self
while obj:
if obj == dir_object:
return True
obj = obj.parent_dir
return False
def __str__(self):
description = super(FakeDirectory, self).__str__() + ':\n'
for item in self.contents:
item_desc = self.contents[item].__str__()
for line in item_desc.split('\n'):
if line:
description = description + ' ' + line + '\n'
return description
Deprecator.add(FakeDirectory, FakeDirectory.add_entry, 'AddEntry')
Deprecator.add(FakeDirectory, FakeDirectory.get_entry, 'GetEntry')
Deprecator.add(FakeDirectory, FakeDirectory.set_contents, 'SetContents')
Deprecator.add(FakeDirectory, FakeDirectory.remove_entry, 'RemoveEntry')
class FakeDirectoryFromRealDirectory(FakeDirectory):
def __init__(self, source_path, filesystem, read_only,
target_path=None):
target_path = target_path or source_path
real_stat = os.stat(source_path)
super(FakeDirectoryFromRealDirectory, self).__init__(
name=os.path.split(target_path)[1],
perm_bits=real_stat.st_mode,
filesystem=filesystem)
self.st_ctime = real_stat.st_ctime
self.st_atime = real_stat.st_atime
self.st_mtime = real_stat.st_mtime
self.st_gid = real_stat.st_gid
self.st_uid = real_stat.st_uid
self.source_path = source_path
self.read_only = read_only
self.contents_read = False
@property
def contents(self):
if not self.contents_read:
self.contents_read = True
base = self.path
for entry in os.listdir(self.source_path):
source_path = os.path.join(self.source_path, entry)
target_path = os.path.join(base, entry)
if os.path.islink(source_path):
self.filesystem.add_real_symlink(source_path, target_path)
elif os.path.isdir(source_path):
self.filesystem.add_real_directory(
source_path, self.read_only, target_path=target_path)
else:
self.filesystem.add_real_file(
source_path, self.read_only, target_path=target_path)
return self.byte_contents
@property
def size(self):
if not self.contents_read:
return 0
return super(FakeDirectoryFromRealDirectory, self).size
class FakeFilesystem:
def __init__(self, path_separator=os.path.sep, total_size=None,
patcher=None):
self.path_separator = path_separator
self.alternative_path_separator = os.path.altsep
self.patcher = patcher
if path_separator != os.sep:
self.alternative_path_separator = None
self.is_windows_fs = sys.platform == 'win32'
self.is_macos = sys.platform == 'darwin'
self.is_case_sensitive = not (self.is_windows_fs or self.is_macos)
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.umask = os.umask(0o22)
os.umask(self.umask)
# A list of open file objects. Their position in the list is their
# file descriptor number
self.open_files = []
# A heap containing all free positions in self.open_files list
self._free_fd_heap = []
# last used numbers for inodes (st_ino) and devices (st_dev)
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
self.dev_null = FakeNullFile(self)
@property
def is_linux(self):
return not self.is_windows_fs and not self.is_macos
def reset(self, total_size=None):
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.open_files = []
self._free_fd_heap = []
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
def pause(self):
if self.patcher is None:
raise RuntimeError('pause() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.pause()
def resume(self):
if self.patcher is None:
raise RuntimeError('resume() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.resume()
def line_separator(self):
return '\r\n' if self.is_windows_fs else '\n'
def _error_message(self, errno):
return os.strerror(errno) + ' in the fake filesystem'
def raise_os_error(self, errno, filename=None, winerror=None):
message = self._error_message(errno)
if (winerror is not None and sys.platform == 'win32' and
self.is_windows_fs):
raise OSError(errno, message, filename, winerror)
raise OSError(errno, message, filename)
@staticmethod
def _matching_string(matched, string):
if string is None:
return string
if isinstance(matched, bytes) and isinstance(string, str):
return string.encode(locale.getpreferredencoding(False))
return string
def _path_separator(self, path):
return self._matching_string(path, self.path_separator)
def _alternative_path_separator(self, path):
return self._matching_string(path, self.alternative_path_separator)
def add_mount_point(self, path, total_size=None):
path = self.absnormpath(path)
if path in self.mount_points:
self.raise_os_error(errno.EEXIST, path)
self.last_dev += 1
self.mount_points[path] = {
'idev': self.last_dev, 'total_size': total_size, 'used_size': 0
}
# special handling for root path: has been created before
if path == self.root.name:
root_dir = self.root
self.last_ino += 1
root_dir.st_ino = self.last_ino
else:
root_dir = self.create_dir(path)
root_dir.st_dev = self.last_dev
return self.mount_points[path]
def _auto_mount_drive_if_needed(self, path, force=False):
if (self.is_windows_fs and
(force or not self._mount_point_for_path(path))):
drive = self.splitdrive(path)[0]
if drive:
return self.add_mount_point(path=drive)
def _mount_point_for_path(self, path):
def to_str(string):
if string is None or isinstance(string, str):
return string
return string.decode(locale.getpreferredencoding(False))
path = self.absnormpath(self._original_path(path))
if path in self.mount_points:
return self.mount_points[path]
mount_path = self._matching_string(path, '')
drive = self.splitdrive(path)[:1]
for root_path in self.mount_points:
root_path = self._matching_string(path, root_path)
if drive and not root_path.startswith(drive):
continue
if path.startswith(root_path) and len(root_path) > len(mount_path):
mount_path = root_path
if mount_path:
return self.mount_points[to_str(mount_path)]
mount_point = self._auto_mount_drive_if_needed(path, force=True)
assert mount_point
return mount_point
def _mount_point_for_device(self, idev):
for mount_point in self.mount_points.values():
if mount_point['idev'] == idev:
return mount_point
def get_disk_usage(self, path=None):
DiskUsage = namedtuple('usage', 'total, used, free')
if path is None:
mount_point = self.mount_points[self.root.name]
else:
mount_point = self._mount_point_for_path(path)
if mount_point and mount_point['total_size'] is not None:
return DiskUsage(mount_point['total_size'],
mount_point['used_size'],
mount_point['total_size'] -
mount_point['used_size'])
return DiskUsage(
1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)
def set_disk_usage(self, total_size, path=None):
if path is None:
path = self.root.name
mount_point = self._mount_point_for_path(path)
if (mount_point['total_size'] is not None and
mount_point['used_size'] > total_size):
self.raise_os_error(errno.ENOSPC, path)
mount_point['total_size'] = total_size
def change_disk_usage(self, usage_change, file_path, st_dev):
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if total_size is not None:
if total_size - mount_point['used_size'] < usage_change:
self.raise_os_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change
def stat(self, entry_path, follow_symlinks=True):
# stat should return the tuple representing return value of os.stat
file_object = self.resolve(
entry_path, follow_symlinks,
allow_fd=True, check_read_perm=False)
if not is_root():
# make sure stat raises if a parent dir is not readable
parent_dir = file_object.parent_dir
if parent_dir:
self.get_object(parent_dir.path)
self.raise_for_filepath_ending_with_separator(
entry_path, file_object, follow_symlinks)
return file_object.stat_result.copy()
def raise_for_filepath_ending_with_separator(self, entry_path,
file_object,
follow_symlinks=True,
macos_handling=False):
if self.ends_with_path_separator(entry_path):
if S_ISLNK(file_object.st_mode):
try:
link_object = self.resolve(entry_path)
except OSError as exc:
if self.is_macos and exc.errno != errno.ENOENT:
return
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, entry_path)
raise
if not follow_symlinks or self.is_windows_fs or self.is_macos:
file_object = link_object
if self.is_windows_fs:
is_error = S_ISREG(file_object.st_mode)
elif self.is_macos and macos_handling:
is_error = not S_ISLNK(file_object.st_mode)
else:
is_error = not S_ISDIR(file_object.st_mode)
if is_error:
error_nr = (errno.EINVAL if self.is_windows_fs
else errno.ENOTDIR)
self.raise_os_error(error_nr, entry_path)
def chmod(self, path, mode, follow_symlinks=True):
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if self.is_windows_fs:
if mode & PERM_WRITE:
file_object.st_mode = file_object.st_mode | 0o222
else:
file_object.st_mode = file_object.st_mode & 0o777555
else:
file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
(mode & PERM_ALL))
file_object.st_ctime = time.time()
def utime(self, path, times=None, *, ns=None, follow_symlinks=True):
self._handle_utime_arg_errors(ns, times)
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if times is not None:
for file_time in times:
if not isinstance(file_time, (int, float)):
raise TypeError('atime and mtime must be numbers')
file_object.st_atime = times[0]
file_object.st_mtime = times[1]
elif ns is not None:
for file_time in ns:
if not isinstance(file_time, int):
raise TypeError('atime and mtime must be ints')
file_object.st_atime_ns = ns[0]
file_object.st_mtime_ns = ns[1]
else:
current_time = time.time()
file_object.st_atime = current_time
file_object.st_mtime = current_time
def _handle_utime_arg_errors(self, ns, times):
if times is not None and ns is not None:
raise ValueError(
"utime: you may specify either 'times' or 'ns' but not both")
if times is not None and len(times) != 2:
raise TypeError(
"utime: 'times' must be either a tuple of two ints or None")
if ns is not None and len(ns) != 2:
raise TypeError("utime: 'ns' must be a tuple of two ints")
@Deprecator
def SetIno(self, path, st_ino):
self.get_object(path).st_ino = st_ino
def _add_open_file(self, file_obj):
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1
def _close_open_file(self, file_des):
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
def get_open_file(self, file_des):
if not is_int_type(file_des):
raise TypeError('an integer is required')
if (file_des >= len(self.open_files) or
self.open_files[file_des] is None):
self.raise_os_error(errno.EBADF, str(file_des))
return self.open_files[file_des][0]
def has_open_file(self, file_object):
return (file_object in [wrappers[0].get_object()
for wrappers in self.open_files if wrappers])
def _normalize_path_sep(self, path):
if self.alternative_path_separator is None or not path:
return path
return path.replace(self._alternative_path_separator(path),
self._path_separator(path))
def normcase(self, path):
path = make_string_path(path)
return self._normalize_path_sep(path)
def normpath(self, path):
path = self.normcase(path)
drive, path = self.splitdrive(path)
sep = self._path_separator(path)
is_absolute_path = path.startswith(sep)
path_components = path.split(sep)
collapsed_path_components = []
dot = self._matching_string(path, '.')
dotdot = self._matching_string(path, '..')
for component in path_components:
if (not component) or (component == dot):
continue
if component == dotdot:
if collapsed_path_components and (
collapsed_path_components[-1] != dotdot):
# Remove an up-reference: directory/..
collapsed_path_components.pop()
continue
elif is_absolute_path:
# Ignore leading .. components if starting from the
# root directory.
continue
collapsed_path_components.append(component)
collapsed_path = sep.join(collapsed_path_components)
if is_absolute_path:
collapsed_path = sep + collapsed_path
return drive + collapsed_path or dot
def _original_path(self, path):
def components_to_path():
if len(path_components) > len(normalized_components):
normalized_components.extend(
path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if path.startswith(sep) and not normalized_path.startswith(sep):
normalized_path = sep + normalized_path
return normalized_path
if self.is_case_sensitive or not path:
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if not isinstance(current_dir, FakeDirectory):
return components_to_path()
dir_name, current_dir = self._directory_content(
current_dir, component)
if current_dir is None or (
isinstance(current_dir, FakeDirectory) and
current_dir._byte_contents is None and
current_dir.st_size == 0):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
def absnormpath(self, path):
path = self.normcase(path)
cwd = self._matching_string(path, self.cwd)
if not path:
path = self.path_separator
if path == self._matching_string(path, '.'):
path = cwd
elif not self._starts_with_root_path(path):
# Prefix relative paths with cwd, if cwd is not root.
root_name = self._matching_string(path, self.root.name)
empty = self._matching_string(path, '')
path = self._path_separator(path).join(
(cwd != root_name and cwd or empty, path))
if path == self._matching_string(path, '.'):
path = cwd
return self.normpath(path)
def splitpath(self, path):
path = self.normcase(path)
sep = self._path_separator(path)
path_components = path.split(sep)
if not path_components:
return ('', '')
starts_with_drive = self._starts_with_drive_letter(path)
basename = path_components.pop()
colon = self._matching_string(path, ':')
if not path_components:
if starts_with_drive:
components = basename.split(colon)
return (components[0] + colon, components[1])
return ('', basename)
for component in path_components:
if component:
# The path is not the root; it contains a non-separator
# component. Strip all trailing separators.
while not path_components[-1]:
path_components.pop()
if starts_with_drive:
if not path_components:
components = basename.split(colon)
return (components[0] + colon, components[1])
if (len(path_components) == 1 and
path_components[0].endswith(colon)):
return (path_components[0] + sep, basename)
return (sep.join(path_components), basename)
# Root path. Collapse all leading separators.
return (sep, basename)
def splitdrive(self, path):
path = make_string_path(path)
if self.is_windows_fs:
if len(path) >= 2:
path = self.normcase(path)
sep = self._path_separator(path)
# UNC path handling
if (path[0:2] == sep * 2) and (
path[2:3] != sep):
# UNC path handling - splits off the mount point
# instead of the drive
sep_index = path.find(sep, 2)
if sep_index == -1:
return path[:0], path
sep_index2 = path.find(sep, sep_index + 1)
if sep_index2 == sep_index + 1:
return path[:0], path
if sep_index2 == -1:
sep_index2 = len(path)
return path[:sep_index2], path[sep_index2:]
if path[1:2] == self._matching_string(path, ':'):
return path[:2], path[2:]
return path[:0], path
def _join_paths_with_drive_support(self, *all_paths):
base_path = all_paths[0]
paths_to_add = all_paths[1:]
sep = self._path_separator(base_path)
seps = [sep, self._alternative_path_separator(base_path)]
result_drive, result_path = self.splitdrive(base_path)
for path in paths_to_add:
drive_part, path_part = self.splitdrive(path)
if path_part and path_part[:1] in seps:
# Second path is absolute
if drive_part or not result_drive:
result_drive = drive_part
result_path = path_part
continue
elif drive_part and drive_part != result_drive:
if (self.is_case_sensitive or
drive_part.lower() != result_drive.lower()):
# Different drives => ignore the first path entirely
result_drive = drive_part
result_path = path_part
continue
# Same drive in different case
result_drive = drive_part
# Second path is relative to the first
if result_path and result_path[-1:] not in seps:
result_path = result_path + sep
result_path = result_path + path_part
# add separator between UNC and non-absolute path
colon = self._matching_string(base_path, ':')
if (result_path and result_path[:1] not in seps and
result_drive and result_drive[-1:] != colon):
return result_drive + sep + result_path
return result_drive + result_path
def joinpaths(self, *paths):
if sys.version_info >= (3, 6):
paths = [os.fspath(path) for path in paths]
if len(paths) == 1:
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*paths)
joined_path_segments = []
sep = self._path_separator(paths[0])
for path_segment in paths:
if self._starts_with_root_path(path_segment):
# An absolute path
joined_path_segments = [path_segment]
else:
if (joined_path_segments and
not joined_path_segments[-1].endswith(sep)):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return self._matching_string(paths[0], '').join(joined_path_segments)
def _path_components(self, path):
if not path or path == self._path_separator(path):
return []
drive, path = self.splitdrive(path)
path_components = path.split(self._path_separator(path))
assert drive or path_components
if not path_components[0]:
if len(path_components) > 1 and not path_components[1]:
path_components = []
else:
# This is an absolute path.
path_components = path_components[1:]
if drive:
path_components.insert(0, drive)
return path_components
def _starts_with_drive_letter(self, file_path):
colon = self._matching_string(file_path, ':')
return (self.is_windows_fs and len(file_path) >= 2 and
file_path[:1].isalpha and (file_path[1:2]) == colon)
def _starts_with_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
file_path = self._normalize_path_sep(file_path)
return (file_path.startswith(root_name) or
not self.is_case_sensitive and file_path.lower().startswith(
root_name.lower()) or
self._starts_with_drive_letter(file_path))
def _is_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
return (file_path == root_name or not self.is_case_sensitive and
file_path.lower() == root_name.lower() or
2 <= len(file_path) <= 3 and
self._starts_with_drive_letter(file_path))
def ends_with_path_separator(self, file_path):
if is_int_type(file_path):
return False
file_path = make_string_path(file_path)
return (file_path and
file_path not in (self.path_separator,
self.alternative_path_separator) and
(file_path.endswith(self._path_separator(file_path)) or
self.alternative_path_separator is not None and
file_path.endswith(
self._alternative_path_separator(file_path))))
def is_filepath_ending_with_separator(self, path):
if not self.ends_with_path_separator(path):
return False
return self.isfile(self._path_without_trailing_separators(path))
def _directory_content(self, directory, component):
if not isinstance(directory, FakeDirectory):
return None, None
if component in directory.contents:
return component, directory.contents[component]
if not self.is_case_sensitive:
matching_content = [(subdir, directory.contents[subdir]) for
subdir in directory.contents
if subdir.lower() == component.lower()]
if matching_content:
return matching_content[0]
return None, None
def exists(self, file_path, check_link=False):
if check_link and self.islink(file_path):
return True
file_path = make_string_path(file_path)
if file_path is None:
raise TypeError
if not file_path:
return False
if file_path == self.dev_null.name:
return not self.is_windows_fs or sys.version_info >= (3, 8)
try:
if self.is_filepath_ending_with_separator(file_path):
return False
file_path = self.resolve_path(file_path)
except OSError:
return False
if file_path == self.root.name:
return True
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
current_dir = self._directory_content(current_dir, component)[1]
if not current_dir:
return False
return True
def resolve_path(self, file_path, allow_fd=False, raw_io=True):
if allow_fd and isinstance(file_path, int):
return self.get_open_file(file_path).get_object().path
file_path = make_string_path(file_path)
if file_path is None:
# file.open(None) raises TypeError, so mimic that.
raise TypeError('Expected file system path string, received None')
if not file_path or not self._valid_relative_path(file_path):
# file.open('') raises OSError, so mimic that, and validate that
# all parts of a relative path exist.
self.raise_os_error(errno.ENOENT, file_path)
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
return file_path
if file_path == self.dev_null.name:
return file_path
path_components = self._path_components(file_path)
resolved_components = self._resolve_components(path_components, raw_io)
return self._components_to_path(resolved_components)
def _components_to_path(self, component_folders):
sep = (self._path_separator(component_folders[0])
if component_folders else self.path_separator)
path = sep.join(component_folders)
if not self._starts_with_root_path(path):
path = sep + path
return path
def _resolve_components(self, path_components, raw_io):
current_dir = self.root
link_depth = 0
resolved_components = []
while path_components:
component = path_components.pop(0)
resolved_components.append(component)
current_dir = self._directory_content(current_dir, component)[1]
if current_dir is None:
# The component of the path at this point does not actually
# exist in the folder. We can't resolve the path any more.
resolved_components.extend(path_components)
break
if S_ISLNK(current_dir.st_mode):
if link_depth > _MAX_LINK_DEPTH:
self.raise_os_error(errno.ELOOP,
self._components_to_path(
resolved_components))
link_path = self._follow_link(resolved_components, current_dir)
target_components = self._path_components(link_path)
path_components = target_components + path_components
resolved_components = []
current_dir = self.root
link_depth += 1
return resolved_components
def _valid_relative_path(self, file_path):
if self.is_windows_fs:
return True
slash_dotdot = self._matching_string(
file_path, self.path_separator + '..')
while file_path and slash_dotdot in file_path:
file_path = file_path[:file_path.rfind(slash_dotdot)]
if not self.exists(self.absnormpath(file_path)):
return False
return True
def _follow_link(self, link_path_components, link):
link_path = link.contents
if self.is_windows_fs and link_path.startswith('\\\\?\\'):
link_path = link_path[4:]
sep = self._path_separator(link_path)
if not self._starts_with_root_path(link_path):
components = link_path_components[:-1]
components.append(link_path)
link_path = sep.join(components)
return self.normpath(link_path)
def get_object_from_normpath(self, file_path, check_read_perm=True):
file_path = make_string_path(file_path)
if file_path == self.root.name:
return self.root
if file_path == self.dev_null.name:
return self.dev_null
file_path = self._original_path(file_path)
path_components = self._path_components(file_path)
target_object = self.root
try:
for component in path_components:
if S_ISLNK(target_object.st_mode):
target_object = self.resolve(target_object.contents)
if not S_ISDIR(target_object.st_mode):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOTDIR, file_path)
self.raise_os_error(errno.ENOENT, file_path)
target_object = target_object.get_entry(component)
if (not is_root() and check_read_perm and target_object and
not target_object.st_mode & PERM_READ):
self.raise_os_error(errno.EACCES, target_object.path)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
return target_object
def get_object(self, file_path, check_read_perm=True):
file_path = make_string_path(file_path)
file_path = self.absnormpath(self._original_path(file_path))
return self.get_object_from_normpath(file_path, check_read_perm)
def resolve(self, file_path, follow_symlinks=True, allow_fd=False,
check_read_perm=True):
if isinstance(file_path, int):
if allow_fd:
return self.get_open_file(file_path).get_object()
raise TypeError('path should be string, bytes or '
'os.PathLike (if supported), not int')
if follow_symlinks:
file_path = make_string_path(file_path)
return self.get_object_from_normpath(self.resolve_path(
file_path, check_read_perm), check_read_perm)
return self.lresolve(file_path)
def lresolve(self, path):
path = make_string_path(path)
if not path:
raise OSError(errno.ENOENT, path)
if path == self.root.name:
return self.root
path = self._path_without_trailing_separators(path)
if path == self._matching_string(path, '.'):
path = self.cwd
path = self._original_path(path)
parent_directory, child_name = self.splitpath(path)
if not parent_directory:
parent_directory = self.cwd
try:
parent_obj = self.resolve(parent_directory)
assert parent_obj
if not isinstance(parent_obj, FakeDirectory):
if not self.is_windows_fs and isinstance(parent_obj, FakeFile):
self.raise_os_error(errno.ENOTDIR, path)
self.raise_os_error(errno.ENOENT, path)
if not parent_obj.st_mode & PERM_READ:
self.raise_os_error(errno.EACCES, parent_directory)
return (parent_obj.get_entry(child_name) if child_name
else parent_obj)
except KeyError:
self.raise_os_error(errno.ENOENT, path)
def add_object(self, file_path, file_object):
if not file_path:
target_directory = self.root
else:
target_directory = self.resolve(file_path)
if not S_ISDIR(target_directory.st_mode):
error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, file_path)
target_directory.add_entry(file_object)
def rename(self, old_file_path, new_file_path, force_replace=False):
ends_with_sep = self.ends_with_path_separator(old_file_path)
old_file_path = self.absnormpath(old_file_path)
new_file_path = self.absnormpath(new_file_path)
if not self.exists(old_file_path, check_link=True):
self.raise_os_error(errno.ENOENT, old_file_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_file_path)
old_object = self.lresolve(old_file_path)
if not self.is_windows_fs:
self._handle_posix_dir_link_errors(
new_file_path, old_file_path, ends_with_sep)
if self.exists(new_file_path, check_link=True):
new_file_path = self._rename_to_existing_path(
force_replace, new_file_path, old_file_path,
old_object, ends_with_sep)
if not new_file_path:
return
old_dir, old_name = self.splitpath(old_file_path)
new_dir, new_name = self.splitpath(new_file_path)
if not self.exists(new_dir):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if old_dir_object.st_dev != new_dir_object.st_dev:
self.raise_os_error(errno.EXDEV, old_file_path)
if not S_ISDIR(new_dir_object.st_mode):
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.ENOTDIR,
new_file_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_file_path)
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
if new_name in new_dir_object.contents:
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename)
def _handle_broken_link_with_trailing_sep(self, path):
if self.islink(path):
if not self.exists(path):
error = (errno.ENOENT if self.is_macos else
errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, path)
def _handle_posix_dir_link_errors(self, new_file_path, old_file_path,
ends_with_sep):
if (self.isdir(old_file_path, follow_symlinks=False) and
self.islink(new_file_path)):
self.raise_os_error(errno.ENOTDIR, new_file_path)
if (self.isdir(new_file_path, follow_symlinks=False) and
self.islink(old_file_path)):
if ends_with_sep and self.is_macos:
return
error = errno.ENOTDIR if ends_with_sep else errno.EISDIR
self.raise_os_error(error, new_file_path)
if (ends_with_sep and self.islink(old_file_path) and
old_file_path == new_file_path and not self.is_windows_fs):
self.raise_os_error(errno.ENOTDIR, new_file_path)
def _rename_to_existing_path(self, force_replace, new_file_path,
old_file_path, old_object, ends_with_sep):
new_object = self.get_object(new_file_path)
if old_file_path == new_file_path:
if not S_ISLNK(new_object.st_mode) and ends_with_sep:
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_file_path)
return
if old_object == new_object:
new_file_path = self._rename_same_object(
new_file_path, old_file_path)
elif (S_ISDIR(new_object.st_mode) or S_ISLNK(new_object.st_mode)):
self._handle_rename_error_for_dir_or_link(
force_replace, new_file_path,
new_object, old_object, ends_with_sep)
elif S_ISDIR(old_object.st_mode):
error = errno.EEXIST if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, new_file_path)
elif self.is_windows_fs and not force_replace:
self.raise_os_error(errno.EEXIST, new_file_path)
else:
self.remove_object(new_file_path)
return new_file_path
def _handle_rename_error_for_dir_or_link(self, force_replace,
new_file_path, new_object,
old_object, ends_with_sep):
if self.is_windows_fs:
if force_replace:
self.raise_os_error(errno.EACCES, new_file_path)
else:
self.raise_os_error(errno.EEXIST, new_file_path)
if not S_ISLNK(new_object.st_mode):
if new_object.contents:
if (not S_ISLNK(old_object.st_mode) or
not ends_with_sep or not self.is_macos):
self.raise_os_error(errno.ENOTEMPTY, new_file_path)
if S_ISREG(old_object.st_mode):
self.raise_os_error(errno.EISDIR, new_file_path)
def _rename_same_object(self, new_file_path, old_file_path):
do_rename = old_file_path.lower() == new_file_path.lower()
if not do_rename:
try:
real_old_path = self.resolve_path(old_file_path)
original_old_path = self._original_path(real_old_path)
real_new_path = self.resolve_path(new_file_path)
if (real_new_path == original_old_path and
(new_file_path == real_old_path) ==
(new_file_path.lower() ==
real_old_path.lower())):
real_object = self.resolve(old_file_path,
follow_symlinks=False)
do_rename = (os.path.basename(old_file_path) ==
real_object.name or not self.is_macos)
else:
do_rename = (real_new_path.lower() ==
real_old_path.lower())
if do_rename:
parent, file_name = self.splitpath(new_file_path)
new_file_path = self.joinpaths(
self._original_path(parent), file_name)
except OSError:
pass
if not do_rename:
new_file_path = None
return new_file_path
def remove_object(self, file_path):
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
self.raise_os_error(errno.EBUSY, file_path)
try:
dirname, basename = self.splitpath(file_path)
target_directory = self.resolve(dirname, check_read_perm=False)
target_directory.remove_entry(basename)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
except AttributeError:
self.raise_os_error(errno.ENOTDIR, file_path)
def make_string_path(self, path):
path = make_string_path(path)
os_sep = self._matching_string(path, os.sep)
fake_sep = self._matching_string(path, self.path_separator)
return path.replace(os_sep, fake_sep)
def create_dir(self, directory_path, perm_bits=PERM_DEF):
directory_path = self.make_string_path(directory_path)
directory_path = self.absnormpath(directory_path)
self._auto_mount_drive_if_needed(directory_path)
if self.exists(directory_path, check_link=True):
self.raise_os_error(errno.EEXIST, directory_path)
path_components = self._path_components(directory_path)
current_dir = self.root
new_dirs = []
for component in path_components:
directory = self._directory_content(current_dir, component)[1]
if not directory:
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
if S_ISLNK(directory.st_mode):
directory = self.resolve(directory.contents)
current_dir = directory
if directory.st_mode & S_IFDIR != S_IFDIR:
self.raise_os_error(errno.ENOTDIR, current_dir.path)
for new_dir in new_dirs:
new_dir.st_mode = S_IFDIR | perm_bits
return current_dir
def create_file(self, file_path, st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None, create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
side_effect=None):
return self.create_file_internally(
file_path, st_mode, contents, st_size, create_missing_dirs,
apply_umask, encoding, errors, side_effect=side_effect)
def add_real_file(self, source_path, read_only=True, target_path=None):
target_path = target_path or source_path
source_path = make_string_path(source_path)
target_path = self.make_string_path(target_path)
real_stat = os.stat(source_path)
fake_file = self.create_file_internally(target_path,
read_from_real_fs=True)
fake_file.stat_result.set_from_stat_result(real_stat)
if read_only:
fake_file.st_mode &= 0o777444
fake_file.file_path = source_path
self.change_disk_usage(fake_file.size, fake_file.name,
fake_file.st_dev)
return fake_file
def add_real_symlink(self, source_path, target_path=None):
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path) and not os.path.islink(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target = os.readlink(source_path)
if target_path:
return self.create_symlink(target_path, target)
else:
return self.create_symlink(source_path, target)
def add_real_directory(self, source_path, read_only=True, lazy_read=True,
target_path=None):
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target_path = target_path or source_path
if lazy_read:
parent_path = os.path.split(target_path)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path)
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(
source_path, self, read_only, target_path)
parent_dir.add_entry(new_dir)
else:
new_dir = self.create_dir(target_path)
for base, _, files in os.walk(source_path):
new_base = os.path.join(new_dir.path,
os.path.relpath(base, source_path))
for fileEntry in os.listdir(base):
abs_fileEntry = os.path.join(base, fileEntry)
if not os.path.islink(abs_fileEntry):
continue
self.add_real_symlink(
abs_fileEntry, os.path.join(new_base, fileEntry))
for fileEntry in files:
path = os.path.join(base, fileEntry)
if os.path.islink(path):
continue
self.add_real_file(path,
read_only,
os.path.join(new_base, fileEntry))
return new_dir
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only)
def create_file_internally(self, file_path,
st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None,
create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
read_from_real_fs=False, raw_io=False,
side_effect=None):
file_path = self.make_string_path(file_path)
file_path = self.absnormpath(file_path)
if not is_int_type(st_mode):
raise TypeError(
'st_mode must be of int type - did you mean to set contents?')
if self.exists(file_path, check_link=True):
self.raise_os_error(errno.EEXIST, file_path)
parent_directory, new_file = self.splitpath(file_path)
if not parent_directory:
parent_directory = self.cwd
self._auto_mount_drive_if_needed(parent_directory)
if not self.exists(parent_directory):
if not create_missing_dirs:
self.raise_os_error(errno.ENOENT, parent_directory)
self.create_dir(parent_directory)
else:
parent_directory = self._original_path(parent_directory)
if apply_umask:
st_mode &= ~self.umask
if read_from_real_fs:
file_object = FakeFileFromRealFile(file_path, filesystem=self,
side_effect=side_effect)
else:
file_object = FakeFile(new_file, st_mode, filesystem=self,
encoding=encoding, errors=errors,
side_effect=side_effect)
self.add_object(parent_directory, file_object)
if st_size is None and contents is None:
contents = ''
if (not read_from_real_fs and
(contents is not None or st_size is not None)):
try:
if st_size is not None:
file_object.set_large_file_size(st_size)
else:
file_object._set_initial_contents(contents)
except OSError:
self.remove_object(file_path)
raise
return file_object
def create_symlink(self, file_path, link_target, create_missing_dirs=True):
file_path = self.make_string_path(file_path)
link_target = self.make_string_path(link_target)
file_path = self.normcase(file_path)
if self.ends_with_path_separator(file_path):
if self.exists(file_path):
self.raise_os_error(errno.EEXIST, file_path)
if self.exists(link_target):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOENT, file_path)
else:
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, link_target)
if not self.exists(
self._path_without_trailing_separators(file_path),
check_link=True):
self.raise_os_error(errno.ENOENT, link_target)
if self.is_macos:
if self.exists(file_path, check_link=True):
self.remove_object(file_path)
else:
self.raise_os_error(errno.EEXIST, link_target)
if not self.islink(file_path):
file_path = self.resolve_path(file_path)
link_target = make_string_path(link_target)
return self.create_file_internally(
file_path, st_mode=S_IFLNK | PERM_DEF,
contents=link_target,
create_missing_dirs=create_missing_dirs,
raw_io=True)
def link(self, old_path, new_path, follow_symlinks=True):
new_path_normalized = self.absnormpath(new_path)
if self.exists(new_path_normalized, check_link=True):
self.raise_os_error(errno.EEXIST, new_path)
new_parent_directory, new_basename = self.splitpath(
new_path_normalized)
if not new_parent_directory:
new_parent_directory = self.cwd
if not self.exists(new_parent_directory):
self.raise_os_error(errno.ENOENT, new_parent_directory)
if self.ends_with_path_separator(old_path):
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_path)
if not self.is_windows_fs and self.ends_with_path_separator(new_path):
self.raise_os_error(errno.ENOENT, old_path)
try:
old_file = self.resolve(old_path, follow_symlinks=follow_symlinks)
except OSError:
self.raise_os_error(errno.ENOENT, old_path)
if old_file.st_mode & S_IFDIR:
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.EPERM, old_path)
old_file.name = new_basename
self.add_object(new_parent_directory, old_file)
return old_file
def _is_circular_link(self, link_obj):
try:
self.resolve_path(link_obj.contents)
except OSError as exc:
return exc.errno == errno.ELOOP
return False
def readlink(self, path):
if path is None:
raise TypeError
link_obj = self.lresolve(path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
self.raise_os_error(errno.EINVAL, path)
if self.ends_with_path_separator(path):
if not self.is_windows_fs and self.exists(path):
self.raise_os_error(errno.EINVAL, path)
if not self.exists(link_obj.path):
if self.is_windows_fs:
error = errno.EINVAL
elif self._is_circular_link(link_obj):
if self.is_macos:
return link_obj.path
error = errno.ELOOP
else:
error = errno.ENOENT
self.raise_os_error(error, link_obj.path)
return link_obj.contents
def makedir(self, dir_name, mode=PERM_DEF):
dir_name = make_string_path(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name)
parent_dir, _ = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = self._matching_string(
parent_dir, self.path_separator + '..')
if parent_dir.endswith(ellipsis) and not self.is_windows_fs:
base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis)
if not self.exists(base_dir):
self.raise_os_error(errno.ENOENT, base_dir)
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if self.is_windows_fs and dir_name == self.path_separator:
error_nr = errno.EACCES
else:
error_nr = errno.EEXIST
if ends_with_sep and self.is_macos and not self.exists(dir_name):
self.remove_object(dir_name)
else:
self.raise_os_error(error_nr, dir_name)
head, tail = self.splitpath(dir_name)
self.add_object(
head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
def _path_without_trailing_separators(self, path):
while self.ends_with_path_separator(path):
path = path[:-1]
return path
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False):
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
dir_name = to_string(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self.absnormpath(dir_name)
if (ends_with_sep and self.is_macos and
self.exists(dir_name, check_link=True) and
not self.exists(dir_name)):
self.remove_object(dir_name)
path_components = self._path_components(dir_name)
current_dir = self.root
for component in path_components:
if (component not in current_dir.contents
or not isinstance(current_dir.contents, dict)):
break
else:
current_dir = current_dir.contents[component]
try:
self.create_dir(dir_name, mode & ~self.umask)
except OSError as e:
if e.errno == errno.EACCES:
raise
if (not exist_ok or
not isinstance(self.resolve(dir_name), FakeDirectory)):
if self.is_windows_fs and e.errno == errno.ENOTDIR:
e.errno = errno.ENOENT
self.raise_os_error(e.errno, e.filename)
def _is_of_type(self, path, st_flag, follow_symlinks=True):
path = make_string_path(path)
if path is None:
raise TypeError
try:
obj = self.resolve(path, follow_symlinks)
if obj:
self.raise_for_filepath_ending_with_separator(
path, obj, macos_handling=not follow_symlinks)
return S_IFMT(obj.st_mode) == st_flag
except OSError:
return False
return False
def isdir(self, path, follow_symlinks=True):
return self._is_of_type(path, S_IFDIR, follow_symlinks)
def isfile(self, path, follow_symlinks=True):
return self._is_of_type(path, S_IFREG, follow_symlinks)
def islink(self, path):
return self._is_of_type(path, S_IFLNK, follow_symlinks=False)
def confirmdir(self, target_directory):
directory = self.resolve(target_directory)
if not directory.st_mode & S_IFDIR:
self.raise_os_error(errno.ENOTDIR, target_directory, 267)
return directory
def remove(self, path):
norm_path = self.absnormpath(path)
if self.ends_with_path_separator(path):
self._handle_broken_link_with_trailing_sep(norm_path)
if self.exists(norm_path):
obj = self.resolve(norm_path, check_read_perm=False)
if S_IFMT(obj.st_mode) == S_IFDIR:
link_obj = self.lresolve(norm_path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.EISDIR
self.raise_os_error(error, norm_path)
norm_path = make_string_path(norm_path)
if path.endswith(self.path_separator):
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.ENOTDIR
self.raise_os_error(error, norm_path)
else:
self.raise_for_filepath_ending_with_separator(path, obj)
self.remove_object(norm_path)
def rmdir(self, target_directory, allow_symlink=False):
if target_directory in (b'.', u'.'):
error_nr = errno.EACCES if self.is_windows_fs else errno.EINVAL
self.raise_os_error(error_nr, target_directory)
ends_with_sep = self.ends_with_path_separator(target_directory)
target_directory = self.absnormpath(target_directory)
if self.confirmdir(target_directory):
if not self.is_windows_fs and self.islink(target_directory):
if allow_symlink:
return
if not ends_with_sep or not self.is_macos:
self.raise_os_error(errno.ENOTDIR, target_directory)
dir_object = self.resolve(target_directory)
if dir_object.contents:
self.raise_os_error(errno.ENOTEMPTY, target_directory)
self.remove_object(target_directory)
def listdir(self, target_directory):
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = directory.contents
return list(directory_contents.keys())
def __str__(self):
return str(self.root)
def _add_standard_streams(self):
self._add_open_file(StandardStreamWrapper(sys.stdin))
self._add_open_file(StandardStreamWrapper(sys.stdout))
self._add_open_file(StandardStreamWrapper(sys.stderr))
Deprecator.add(FakeFilesystem, FakeFilesystem.get_disk_usage, 'GetDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.set_disk_usage, 'SetDiskUsage')
Deprecator.add(FakeFilesystem,
FakeFilesystem.change_disk_usage, 'ChangeDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_mount_point, 'AddMountPoint')
Deprecator.add(FakeFilesystem, FakeFilesystem.stat, 'GetStat')
Deprecator.add(FakeFilesystem, FakeFilesystem.chmod, 'ChangeMode')
Deprecator.add(FakeFilesystem, FakeFilesystem.utime, 'UpdateTime')
Deprecator.add(FakeFilesystem, FakeFilesystem._add_open_file, 'AddOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem._close_open_file, 'CloseOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.has_open_file, 'HasOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_open_file, 'GetOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem.normcase, 'NormalizePathSeparator')
Deprecator.add(FakeFilesystem, FakeFilesystem.normpath, 'CollapsePath')
Deprecator.add(FakeFilesystem, FakeFilesystem._original_path, 'NormalizeCase')
Deprecator.add(FakeFilesystem, FakeFilesystem.absnormpath, 'NormalizePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitpath, 'SplitPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitdrive, 'SplitDrive')
Deprecator.add(FakeFilesystem, FakeFilesystem.joinpaths, 'JoinPaths')
Deprecator.add(FakeFilesystem,
FakeFilesystem._path_components, 'GetPathComponents')
Deprecator.add(FakeFilesystem, FakeFilesystem._starts_with_drive_letter,
'StartsWithDriveLetter')
Deprecator.add(FakeFilesystem, FakeFilesystem.exists, 'Exists')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve_path, 'ResolvePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object_from_normpath,
'GetObjectFromNormalizedPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object, 'GetObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve, 'ResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.lresolve, 'LResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_object, 'AddObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove_object, 'RemoveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.rename, 'RenameObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_dir, 'CreateDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_file, 'CreateFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_symlink, 'CreateLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.link, 'CreateHardLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.readlink, 'ReadLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedir, 'MakeDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedirs, 'MakeDirectories')
Deprecator.add(FakeFilesystem, FakeFilesystem.isdir, 'IsDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.isfile, 'IsFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.islink, 'IsLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.confirmdir, 'ConfirmDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove, 'RemoveFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.rmdir, 'RemoveDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.listdir, 'ListDir')
class FakePathModule:
_OS_PATH_COPY = _copy_module(os.path)
@staticmethod
def dir():
return [
'abspath', 'dirname', 'exists', 'expanduser', 'getatime',
'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile',
'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath',
'realpath', 'relpath', 'split', 'splitdrive', 'samefile'
]
def __init__(self, filesystem, os_module):
self.filesystem = filesystem
self._os_path = self._OS_PATH_COPY
self._os_path.os = self.os = os_module
self.sep = self.filesystem.path_separator
self.altsep = self.filesystem.alternative_path_separator
def exists(self, path):
return self.filesystem.exists(path)
def lexists(self, path):
return self.filesystem.exists(path, check_link=True)
def getsize(self, path):
file_obj = self.filesystem.resolve(path)
if (self.filesystem.ends_with_path_separator(path) and
S_IFMT(file_obj.st_mode) != S_IFDIR):
error_nr = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOTDIR)
self.filesystem.raise_os_error(error_nr, path)
return file_obj.st_size
def isabs(self, path):
if self.filesystem.is_windows_fs:
path = self.splitdrive(path)[1]
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if self.filesystem.is_windows_fs:
return len(path) > 0 and path[:1] in (sep, altsep)
else:
return (path.startswith(sep) or
altsep is not None and path.startswith(altsep))
def isdir(self, path):
return self.filesystem.isdir(path)
def isfile(self, path):
return self.filesystem.isfile(path)
def islink(self, path):
return self.filesystem.islink(path)
def getmtime(self, path):
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except OSError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3)
def getatime(self, path):
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_atime
def getctime(self, path):
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_ctime
def abspath(self, path):
def getcwd():
if isinstance(path, bytes):
return self.os.getcwdb()
else:
return self.os.getcwd()
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if not self.isabs(path):
path = self.join(getcwd(), path)
elif (self.filesystem.is_windows_fs and
path.startswith(sep) or altsep is not None and
path.startswith(altsep)):
cwd = getcwd()
if self.filesystem._starts_with_drive_letter(cwd):
path = self.join(cwd[:2], path)
return self.normpath(path)
def join(self, *p):
return self.filesystem.joinpaths(*p)
def split(self, path):
return self.filesystem.splitpath(path)
def splitdrive(self, path):
return self.filesystem.splitdrive(path)
def normpath(self, path):
return self.filesystem.normpath(path)
def normcase(self, path):
path = self.filesystem.normcase(path)
if self.filesystem.is_windows_fs:
path = path.lower()
return path
def relpath(self, path, start=None):
if not path:
raise ValueError("no path specified")
path = make_string_path(path)
if start is not None:
start = make_string_path(start)
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_separator is not None:
path = path.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
start = start.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
start = start.replace(
self.filesystem.path_separator, self._os_path.sep)
path = self._os_path.relpath(path, start)
return path.replace(self._os_path.sep, self.filesystem.path_separator)
def realpath(self, filename):
if self.filesystem.is_windows_fs:
return self.abspath(filename)
filename = make_string_path(filename)
path, ok = self._joinrealpath(filename[:0], filename, {})
return self.abspath(path)
def samefile(self, path1, path2):
stat1 = self.filesystem.stat(path1)
stat2 = self.filesystem.stat(path2)
return (stat1.st_ino == stat2.st_ino and
stat1.st_dev == stat2.st_dev)
def _joinrealpath(self, path, rest, seen):
curdir = self.filesystem._matching_string(path, '.')
pardir = self.filesystem._matching_string(path, '..')
sep = self.filesystem._path_separator(path)
if self.isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
continue
if name == pardir:
if path:
path, name = self.filesystem.splitpath(path)
if name == pardir:
path = self.filesystem.joinpaths(path, pardir, pardir)
else:
path = pardir
continue
newpath = self.filesystem.joinpaths(path, name)
if not self.filesystem.islink(newpath):
path = newpath
continue
if newpath in seen:
path = seen[newpath]
if path is not None:
continue
return self.filesystem.joinpaths(newpath, rest), False
seen[newpath] = None
path, ok = self._joinrealpath(
path, self.filesystem.readlink(newpath), seen)
if not ok:
return self.filesystem.joinpaths(path, rest), False
seen[newpath] = path
return path, True
def dirname(self, path):
return self.split(path)[0]
def expanduser(self, path):
return self._os_path.expanduser(path).replace(
self._os_path.sep, self.sep)
def ismount(self, path):
path = make_string_path(path)
if not path:
return False
normed_path = self.filesystem.absnormpath(path)
sep = self.filesystem._path_separator(path)
if self.filesystem.is_windows_fs:
if self.filesystem.alternative_path_separator is not None:
path_seps = (
sep, self.filesystem._alternative_path_separator(path)
)
else:
path_seps = (sep,)
drive, rest = self.filesystem.splitdrive(normed_path)
if drive and drive[:1] in path_seps:
return (not rest) or (rest in path_seps)
if rest in path_seps:
return True
for mount_point in self.filesystem.mount_points:
if normed_path.rstrip(sep) == mount_point.rstrip(sep):
return True
return False
def __getattr__(self, name):
return getattr(self._os_path, name)
class FakeOsModule:
devnull = None
@staticmethod
def dir():
dir = [
'access', 'chdir', 'chmod', 'chown', 'close', 'fstat', 'fsync',
'getcwd', 'lchmod', 'link', 'listdir', 'lstat', 'makedirs',
'mkdir', 'mknod', 'open', 'read', 'readlink', 'remove',
'removedirs', 'rename', 'rmdir', 'stat', 'symlink', 'umask',
'unlink', 'utime', 'walk', 'write', 'getcwdb', 'replace'
]
if sys.platform.startswith('linux'):
dir += [
'fdatasync', 'getxattr', 'listxattr',
'removexattr', 'setxattr'
]
if use_scandir:
dir += ['scandir']
return dir
def __init__(self, filesystem):
self.filesystem = filesystem
self.sep = filesystem.path_separator
self.altsep = filesystem.alternative_path_separator
self.linesep = filesystem.line_separator()
self._os_module = os
self.path = FakePathModule(self.filesystem, self)
self.__class__.devnull = ('/dev/nul' if filesystem.is_windows_fs
else '/dev/nul')
def fdopen(self, fd, *args, **kwargs):
if not is_int_type(fd):
raise TypeError('an integer is required')
return FakeFileOpen(self.filesystem)(fd, *args, **kwargs)
def _umask(self):
if self.filesystem.is_windows_fs:
return 0
if sys.platform == 'win32':
return 0o002
else:
mask = os.umask(0)
os.umask(mask)
return mask
def open(self, path, flags, mode=None, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.open, dir_fd)
if mode is None:
if self.filesystem.is_windows_fs:
mode = 0o666
else:
mode = 0o777 & ~self._umask()
has_tmpfile_flag = (hasattr(os, 'O_TMPFILE') and
flags & getattr(os, 'O_TMPFILE'))
open_modes = _OpenModes(
must_exist=not flags & os.O_CREAT and not has_tmpfile_flag,
can_read=not flags & os.O_WRONLY,
can_write=flags & (os.O_RDWR | os.O_WRONLY) != 0,
truncate=flags & os.O_TRUNC != 0,
append=flags & os.O_APPEND != 0,
must_not_exist=flags & os.O_EXCL != 0
)
if open_modes.must_not_exist and open_modes.must_exist:
raise NotImplementedError(
'O_EXCL without O_CREAT mode is not supported')
if has_tmpfile_flag:
path = self.filesystem.joinpaths(
path, str(uuid.uuid4()))
if (not self.filesystem.is_windows_fs and
self.filesystem.exists(path)):
obj = self.filesystem.resolve(path)
if isinstance(obj, FakeDirectory):
if ((not open_modes.must_exist and
not self.filesystem.is_macos)
or open_modes.can_write):
self.filesystem.raise_os_error(errno.EISDIR, path)
dir_wrapper = FakeDirWrapper(obj, path, self.filesystem)
file_des = self.filesystem._add_open_file(dir_wrapper)
dir_wrapper.filedes = file_des
return file_des
str_flags = 'b'
delete_on_close = has_tmpfile_flag
if hasattr(os, 'O_TEMPORARY'):
delete_on_close = flags & os.O_TEMPORARY == os.O_TEMPORARY
fake_file = FakeFileOpen(
self.filesystem, delete_on_close=delete_on_close, raw_io=True)(
path, str_flags, open_modes=open_modes)
if fake_file.file_object != self.filesystem.dev_null:
self.chmod(path, mode)
return fake_file.fileno()
def close(self, fd):
file_handle = self.filesystem.get_open_file(fd)
file_handle.close()
def read(self, fd, n):
file_handle = self.filesystem.get_open_file(fd)
file_handle.raw_io = True
return file_handle.read(n)
def write(self, fd, contents):
file_handle = self.filesystem.get_open_file(fd)
if isinstance(file_handle, FakeDirWrapper):
self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)
if isinstance(file_handle, FakePipeWrapper):
return file_handle.write(contents)
file_handle.raw_io = True
file_handle._sync_io()
file_handle.update_flush_pos()
file_handle.write(contents)
file_handle.flush()
return len(contents)
def pipe(self):
read_fd, write_fd = os.pipe()
read_wrapper = FakePipeWrapper(self.filesystem, read_fd)
file_des = self.filesystem._add_open_file(read_wrapper)
read_wrapper.filedes = file_des
write_wrapper = FakePipeWrapper(self.filesystem, write_fd)
file_des = self.filesystem._add_open_file(write_wrapper)
write_wrapper.filedes = file_des
return read_wrapper.filedes, write_wrapper.filedes
@staticmethod
def stat_float_times(newvalue=None):
return FakeStatResult.stat_float_times(newvalue)
def fstat(self, fd):
file_object = self.filesystem.get_open_file(fd).get_object()
return file_object.stat_result.copy()
def umask(self, mask):
if not is_int_type(mask):
raise TypeError('an integer is required')
old_umask = self.filesystem.umask
self.filesystem.umask = mask
return old_umask
def chdir(self, path):
path = self.filesystem.resolve_path(
path, allow_fd=True)
self.filesystem.confirmdir(path)
directory = self.filesystem.resolve(path)
if not is_root() and not directory.st_mode | PERM_EXE:
self.filesystem.raise_os_error(errno.EACCES, directory)
self.filesystem.cwd = path
def getcwd(self):
return self.filesystem.cwd
def getcwdb(self):
return bytes(
self.filesystem.cwd, locale.getpreferredencoding(False))
def listdir(self, path):
return self.filesystem.listdir(path)
XATTR_CREATE = 1
XATTR_REPLACE = 2
def getxattr(self, path, attribute, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'getxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return file_obj.xattr.get(attribute)
def listxattr(self, path=None, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'listxattr'")
if path is None:
path = self.getcwd()
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return list(file_obj.xattr.keys())
def removexattr(self, path, attribute, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'removexattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
if attribute in file_obj.xattr:
del file_obj.xattr[attribute]
def setxattr(self, path, attribute, value,
flags=0, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'setxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
if not is_byte_string(value):
raise TypeError('a bytes-like object is required')
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
exists = attribute in file_obj.xattr
if exists and flags == self.XATTR_CREATE:
self.filesystem.raise_os_error(errno.ENODATA, file_obj.path)
if not exists and flags == self.XATTR_REPLACE:
self.filesystem.raise_os_error(errno.EEXIST, file_obj.path)
file_obj.xattr[attribute] = value
if use_scandir:
def scandir(self, path='.'):
return scandir(self.filesystem, path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
return walk(self.filesystem, top, topdown, onerror, followlinks)
def readlink(self, path, dir_fd=None):
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
return self.filesystem.readlink(path)
def stat(self, path, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.stat, dir_fd)
return self.filesystem.stat(path, follow_symlinks)
def lstat(self, path, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.lstat, dir_fd)
return self.filesystem.stat(path, follow_symlinks=False)
def remove(self, path, dir_fd=None):
path = self._path_with_dir_fd(path, self.remove, dir_fd)
self.filesystem.remove(path)
def unlink(self, path, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
self.filesystem.remove(path)
def rename(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst)
def replace(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst, force_replace=True)
def rmdir(self, path, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.rmdir, dir_fd)
self.filesystem.rmdir(path)
def removedirs(self, name):
name = self.filesystem.absnormpath(name)
directory = self.filesystem.confirmdir(name)
if directory.contents:
self.filesystem.raise_os_error(
errno.ENOTEMPTY, self.path.basename(name))
else:
self.rmdir(name)
head, tail = self.path.split(name)
if not tail:
head, tail = self.path.split(head)
while head and tail:
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
self.filesystem.rmdir(head, allow_symlink=True)
head, tail = self.path.split(head)
def mkdir(self, path, mode=PERM_DEF, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.mkdir, dir_fd)
try:
self.filesystem.makedir(path, mode)
except OSError as e:
if e.errno == errno.EACCES:
self.filesystem.raise_os_error(e.errno, path)
raise
def makedirs(self, name, mode=PERM_DEF, exist_ok=None):
if exist_ok is None:
exist_ok = False
self.filesystem.makedirs(name, mode, exist_ok)
def _path_with_dir_fd(self, path, fct, dir_fd):
path = to_string(path)
if dir_fd is not None:
real_fct = getattr(os, fct.__name__)
if real_fct not in self.supports_dir_fd:
raise NotImplementedError(
'dir_fd unavailable on this platform')
if isinstance(path, int):
raise ValueError("%s: Can't specify dir_fd without "
"matching path" % fct.__name__)
if not self.path.isabs(path):
return self.path.join(
self.filesystem.get_open_file(
dir_fd).get_object().path, path)
return path
def access(self, path, mode, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.access, dir_fd)
try:
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
except OSError as os_error:
if os_error.errno == errno.ENOENT:
return False
raise
if is_root():
mode &= ~os.W_OK
return (mode & ((stat_result.st_mode >> 6) & 7)) == mode
def chmod(self, path, mode, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
self.filesystem.chmod(path, mode, follow_symlinks)
def lchmod(self, path, mode):
if self.filesystem.is_windows_fs:
raise (NameError, "name 'lchmod' is not defined")
self.filesystem.chmod(path, mode, follow_symlinks=False)
def utime(self, path, times=None, ns=None,
dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.utime, dir_fd)
self.filesystem.utime(
path, times=times, ns=ns, follow_symlinks=follow_symlinks)
def chown(self, path, uid, gid, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.chown, dir_fd)
file_object = self.filesystem.resolve(
path, follow_symlinks, allow_fd=True)
if not ((is_int_type(uid) or uid is None) and
(is_int_type(gid) or gid is None)):
raise TypeError("An integer is required")
if uid != -1:
file_object.st_uid = uid
if gid != -1:
file_object.st_gid = gid
def mknod(self, path, mode=None, device=0, *, dir_fd=None):
if self.filesystem.is_windows_fs:
raise (AttributeError, "module 'os' has no attribute 'mknode'")
if mode is None:
# note that a default value of 0o600 without a device type is
# documented - this is not how it seems to work
mode = S_IFREG | 0o600
if device or not mode & S_IFREG and not is_root():
self.filesystem.raise_os_error(errno.EPERM)
path = self._path_with_dir_fd(path, self.mknod, dir_fd)
head, tail = self.path.split(path)
if not tail:
if self.filesystem.exists(head, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.raise_os_error(errno.ENOENT, path)
if tail in (b'.', u'.', b'..', u'..'):
self.filesystem.raise_os_error(errno.ENOENT, path)
if self.filesystem.exists(path, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.add_object(head, FakeFile(
tail, mode & ~self.filesystem.umask,
filesystem=self.filesystem))
def symlink(self, src, dst, *, dir_fd=None):
src = self._path_with_dir_fd(src, self.symlink, dir_fd)
self.filesystem.create_symlink(
dst, src, create_missing_dirs=False)
def link(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
src = self._path_with_dir_fd(src, self.link, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.link, dst_dir_fd)
self.filesystem.link(src, dst)
def fsync(self, fd):
# Throw an error if file_des isn't valid
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(fd)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path)
def fdatasync(self, fd):
if self.filesystem.is_windows_fs or self.filesystem.is_macos:
raise AttributeError("module 'os' has no attribute 'fdatasync'")
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
self.filesystem.get_open_file(fd)
def sendfile(self, fd_out, fd_in, offset, count):
if self.filesystem.is_windows_fs:
raise AttributeError("module 'os' has no attribute 'sendfile'")
if 0 <= fd_in < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
if 0 <= fd_out < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
source = self.filesystem.get_open_file(fd_in)
dest = self.filesystem.get_open_file(fd_out)
if self.filesystem.is_macos:
if dest.get_object().stat_result.st_mode & 0o777000 != S_IFSOCK:
raise OSError('Socket operation on non-socket')
if offset is None:
if self.filesystem.is_macos:
raise TypeError('None is not a valid offset')
contents = source.read(count)
else:
position = source.tell()
source.seek(offset)
if count == 0 and self.filesystem.is_macos:
contents = source.read()
else:
contents = source.read(count)
source.seek(position)
if contents:
written = dest.write(contents)
dest.flush()
return written
return 0
def __getattr__(self, name):
return getattr(self._os_module, name)
class FakeIoModule:
@staticmethod
def dir():
return 'open',
def __init__(self, filesystem):
self.filesystem = filesystem
self._io_module = io
def open(self, file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None):
fake_open = FakeFileOpen(self.filesystem)
return fake_open(file, mode, buffering, encoding, errors,
newline, closefd, opener)
def __getattr__(self, name):
return getattr(self._io_module, name)
class FakeFileWrapper:
def __init__(self, file_object, file_path, update=False, read=False,
append=False, delete_on_close=False, filesystem=None,
newline=None, binary=True, closefd=True, encoding=None,
errors=None, raw_io=False, is_stream=False):
self.file_object = file_object
self.file_path = file_path
self._append = append
self._read = read
self.allow_update = update
self._closefd = closefd
self._file_epoch = file_object.epoch
self.raw_io = raw_io
self._binary = binary
self.is_stream = is_stream
self._changed = False
contents = file_object.byte_contents
self._encoding = encoding or locale.getpreferredencoding(False)
errors = errors or 'strict'
buffer_class = (NullFileBufferIO if file_object == filesystem.dev_null
else FileBufferIO)
self._io = buffer_class(contents, linesep=filesystem.line_separator(),
binary=binary, encoding=encoding,
newline=newline, errors=errors)
self._read_whence = 0
self._read_seek = 0
self._flush_pos = 0
if contents:
self._flush_pos = len(contents)
if update:
if not append:
self._io.seek(0)
else:
self._io.seek(self._flush_pos)
self._read_seek = self._io.tell()
if delete_on_close:
assert filesystem, 'delete_on_close=True requires filesystem'
self._filesystem = filesystem
self.delete_on_close = delete_on_close
# override, don't modify FakeFile.name, as FakeFilesystem expects
self.name = file_object.opened_as
self.filedes = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _raise(self, message):
if self.raw_io:
self._filesystem.raise_os_error(errno.EBADF, self.file_path)
raise io.UnsupportedOperation(message)
def get_object(self):
return self.file_object
def fileno(self):
return self.filedes
def close(self):
if not self._is_open():
return
if self.allow_update and not self.raw_io:
self.flush()
if self._filesystem.is_windows_fs and self._changed:
self.file_object.st_mtime = time.time()
if self._closefd:
self._filesystem._close_open_file(self.filedes)
else:
self._filesystem.open_files[self.filedes].remove(self)
if self.delete_on_close:
self._filesystem.remove_object(self.get_object().path)
@property
def closed(self):
return not self._is_open()
def flush(self):
self._check_open_file()
if self.allow_update and not self.is_stream:
contents = self._io.getvalue()
if self._append:
self._sync_io()
old_contents = (self.file_object.byte_contents
if is_byte_string(contents) else
self.file_object.contents)
contents = old_contents + contents[self._flush_pos:]
self._set_stream_contents(contents)
self.update_flush_pos()
else:
self._io.flush()
if self.file_object.set_contents(contents, self._encoding):
if self._filesystem.is_windows_fs:
self._changed = True
else:
current_time = time.time()
self.file_object.st_ctime = current_time
self.file_object.st_mtime = current_time
self._file_epoch = self.file_object.epoch
if not self.is_stream:
self._flush_related_files()
def update_flush_pos(self):
self._flush_pos = self._io.tell()
def _flush_related_files(self):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
not open_file._append):
open_file._sync_io()
def seek(self, offset, whence=0):
self._check_open_file()
if not self._append:
self._io.seek(offset, whence)
else:
self._read_seek = offset
self._read_whence = whence
if not self.is_stream:
self.flush()
def tell(self):
self._check_open_file()
if not self.is_stream:
self.flush()
if not self._append:
return self._io.tell()
if self._read_whence:
write_seek = self._io.tell()
self._io.seek(self._read_seek, self._read_whence)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(write_seek)
return self._read_seek
def _sync_io(self):
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch
def _set_stream_contents(self, contents):
whence = self._io.tell()
self._io.seek(0)
self._io.truncate()
if not self._io.binary and is_byte_string(contents):
contents = contents.decode(self._encoding)
self._io.putvalue(contents)
if not self._append:
self._io.seek(whence)
def _read_wrappers(self, name):
io_attr = getattr(self._io, name)
def read_wrapper(*args, **kwargs):
self._io.seek(self._read_seek, self._read_whence)
ret_value = io_attr(*args, **kwargs)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(0, 2)
return ret_value
return read_wrapper
def _other_wrapper(self, name, writing):
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
return ret_value
return other_wrapper
def _adapt_size_for_related_files(self, size):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
open_file._append):
open_file._read_seek += size
def _truncate_wrapper(self):
io_attr = getattr(self._io, 'truncate')
def truncate_wrapper(*args, **kwargs):
if self._append:
self._io.seek(self._read_seek, self._read_whence)
size = io_attr(*args, **kwargs)
self.flush()
if not self.is_stream:
self.file_object.size = size
buffer_size = len(self._io.getvalue())
if buffer_size < size:
self._io.seek(buffer_size)
self._io.write('\0' * (size - buffer_size))
self.file_object.set_contents(
self._io.getvalue(), self._encoding)
self._flush_pos = size
self._adapt_size_for_related_files(size - buffer_size)
self.flush()
return size
return truncate_wrapper
def size(self):
return self.file_object.st_size
def __getattr__(self, name):
if self.file_object.is_large_file():
raise FakeLargeFileIoException(self.file_path)
reading = name.startswith('read') or name == 'next'
truncate = name == 'truncate'
writing = name.startswith('write') or truncate
if reading or writing:
self._check_open_file()
if not self._read and reading:
return self._read_error()
if not self.allow_update and writing:
return self._write_error()
if reading:
self._sync_io()
if not self.is_stream:
self.flush()
if not self._filesystem.is_windows_fs:
self.file_object.st_atime = time.time()
if truncate:
return self._truncate_wrapper()
if self._append:
if reading:
return self._read_wrappers(name)
else:
return self._other_wrapper(name, writing)
return getattr(self._io, name)
def _read_error(self):
def read_error(*args, **kwargs):
if args and args[0] == 0:
if self._filesystem.is_windows_fs and self.raw_io:
return b'' if self._binary else u''
self._raise('File is not open for reading.')
return read_error
def _write_error(self):
def write_error(*args, **kwargs):
if self.raw_io:
if (self._filesystem.is_windows_fs and args
and len(args[0]) == 0):
return 0
self._raise('File is not open for writing.')
return write_error
def _is_open(self):
return (self.filedes < len(self._filesystem.open_files) and
self._filesystem.open_files[self.filedes] is not None and
self in self._filesystem.open_files[self.filedes])
def _check_open_file(self):
if not self.is_stream and not self._is_open():
raise ValueError('I/O operation on closed file')
def __iter__(self):
if not self._read:
self._raise('File is not open for reading')
return self._io.__iter__()
def __next__(self):
if not self._read:
self._raise('File is not open for reading')
return next(self._io)
class StandardStreamWrapper:
def __init__(self, stream_object):
self._stream_object = stream_object
self.filedes = None
def get_object(self):
return self._stream_object
def fileno(self):
return self.filedes
def close(self):
pass
def is_stream(self):
return True
class FakeDirWrapper:
def __init__(self, file_object, file_path, filesystem):
self.file_object = file_object
self.file_path = file_path
self._filesystem = filesystem
self.filedes = None
def get_object(self):
return self.file_object
def fileno(self):
return self.filedes
def close(self):
self._filesystem._close_open_file(self.filedes)
class FakePipeWrapper:
def __init__(self, filesystem, fd):
self._filesystem = filesystem
self.fd = fd
self.file_object = None
self.filedes = None
def get_object(self):
return self.file_object
def fileno(self):
return self.filedes
def read(self, numBytes):
return os.read(self.fd, numBytes)
def write(self, contents):
return os.write(self.fd, contents)
def close(self):
self._filesystem.open_files[self.filedes].remove(self)
os.close(self.fd)
Deprecator.add(FakeFileWrapper, FakeFileWrapper.get_object, 'GetObject')
Deprecator.add(FakeFileWrapper, FakeFileWrapper.size, 'Size')
class FakeFileOpen:
__name__ = 'FakeFileOpen'
def __init__(self, filesystem, delete_on_close=False, raw_io=False):
self.filesystem = filesystem
self._delete_on_close = delete_on_close
self.raw_io = raw_io
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
def call(self, file_, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None,
open_modes=None):
binary = 'b' in mode
newline, open_modes = self._handle_file_mode(mode, newline, open_modes)
file_object, file_path, filedes, real_path = self._handle_file_arg(
file_)
if not filedes:
closefd = True
if (open_modes.must_not_exist and
(file_object or self.filesystem.islink(file_path) and
not self.filesystem.is_windows_fs)):
self.filesystem.raise_os_error(errno.EEXIST, file_path)
file_object = self._init_file_object(file_object,
file_path, open_modes,
real_path)
if S_ISDIR(file_object.st_mode):
if self.filesystem.is_windows_fs:
self.filesystem.raise_os_error(errno.EACCES, file_path)
else:
self.filesystem.raise_os_error(errno.EISDIR, file_path)
file_object.opened_as = file_path
if open_modes.truncate:
current_time = time.time()
file_object.st_mtime = current_time
if not self.filesystem.is_windows_fs:
file_object.st_ctime = current_time
fakefile = FakeFileWrapper(file_object,
file_path,
update=open_modes.can_write,
read=open_modes.can_read,
append=open_modes.append,
delete_on_close=self._delete_on_close,
filesystem=self.filesystem,
newline=newline,
binary=binary,
closefd=closefd,
encoding=encoding,
errors=errors,
raw_io=self.raw_io)
if filedes is not None:
fakefile.filedes = filedes
self.filesystem.open_files[filedes].append(fakefile)
else:
fakefile.filedes = self.filesystem._add_open_file(fakefile)
return fakefile
def _init_file_object(self, file_object, file_path,
open_modes, real_path):
if file_object:
if (not is_root() and
((open_modes.can_read and
not file_object.st_mode & PERM_READ)
or (open_modes.can_write and
not file_object.st_mode & PERM_WRITE))):
self.filesystem.raise_os_error(errno.EACCES, file_path)
if open_modes.can_write:
if open_modes.truncate:
file_object.set_contents('')
else:
if open_modes.must_exist:
self.filesystem.raise_os_error(errno.ENOENT, file_path)
if self.filesystem.islink(file_path):
link_object = self.filesystem.resolve(file_path,
follow_symlinks=False)
target_path = link_object.contents
else:
target_path = file_path
if self.filesystem.ends_with_path_separator(target_path):
error = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOENT if self.filesystem.is_macos
else errno.EISDIR)
self.filesystem.raise_os_error(error, file_path)
file_object = self.filesystem.create_file_internally(
real_path, create_missing_dirs=False,
apply_umask=True, raw_io=self.raw_io)
return file_object
def _handle_file_arg(self, file_):
file_object = None
if isinstance(file_, int):
filedes = file_
wrapper = self.filesystem.get_open_file(filedes)
self._delete_on_close = wrapper.delete_on_close
file_object = self.filesystem.get_open_file(filedes).get_object()
file_path = file_object.name
real_path = file_path
else:
filedes = None
file_path = file_
if file_path == self.filesystem.dev_null.name:
file_object = self.filesystem.dev_null
real_path = file_path
else:
real_path = self.filesystem.resolve_path(
file_path, raw_io=self.raw_io)
if self.filesystem.exists(file_path):
file_object = self.filesystem.get_object_from_normpath(
real_path, check_read_perm=False)
return file_object, file_path, filedes, real_path
def _handle_file_mode(self, mode, newline, open_modes):
orig_modes = mode
if 'b' in mode and 't' in mode:
raise ValueError('Invalid mode: ' + mode)
mode = mode.replace('t', '').replace('b', '')
mode = mode.replace('rU', 'r').replace('U', 'r')
if not self.raw_io:
if mode not in _OPEN_MODE_MAP:
raise ValueError('Invalid mode: %r' % orig_modes)
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
return newline, open_modes
def _run_doctest():
import doctest
import pyfakefs
return doctest.testmod(pyfakefs.fake_filesystem)
if __name__ == '__main__':
_run_doctest()
| true | true |
f71563a5ae0890e106de48a27661337660c13119 | 5,139 | py | Python | Lib/wsgiref/simple_server.py | livioso/cpython | 077061a7b24917aaf31057885c69919c5a553c88 | [
"PSF-2.0"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | Lib/wsgiref/simple_server.py | livioso/cpython | 077061a7b24917aaf31057885c69919c5a553c88 | [
"PSF-2.0"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | Lib/wsgiref/simple_server.py | livioso/cpython | 077061a7b24917aaf31057885c69919c5a553c88 | [
"PSF-2.0"
] | 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | """BaseHTTPServer that implements the Python WSGI protocol (PEP 3333)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
import urllib.parse
from wsgiref.handlers import SimpleHandler
from platform import python_implementation
__version__ = "0.2"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = python_implementation() + "/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['SERVER_SOFTWARE'] = self.server_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.parse.unquote(path, 'iso-8859-1')
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
for k, v in self.headers.items():
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from io import StringIO
stdout = StringIO()
print("Hello world!", file=stdout)
print(file=stdout)
h = sorted(environ.items())
for k,v in h:
print(k,'=',repr(v), file=stdout)
start_response("200 OK", [('Content-Type','text/plain; charset=utf-8')])
return [stdout.getvalue().encode("utf-8")]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
with make_server('', 8000, demo_app) as httpd:
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
| 31.145455 | 79 | 0.627359 |
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
import urllib.parse
from wsgiref.handlers import SimpleHandler
from platform import python_implementation
__version__ = "0.2"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = python_implementation() + "/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
application = None
def server_bind(self):
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['SERVER_SOFTWARE'] = self.server_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.parse.unquote(path, 'iso-8859-1')
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
for k, v in self.headers.items():
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request():
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from io import StringIO
stdout = StringIO()
print("Hello world!", file=stdout)
print(file=stdout)
h = sorted(environ.items())
for k,v in h:
print(k,'=',repr(v), file=stdout)
start_response("200 OK", [('Content-Type','text/plain; charset=utf-8')])
return [stdout.getvalue().encode("utf-8")]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
with make_server('', 8000, demo_app) as httpd:
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request()
| true | true |
f71563a970e54d91f082ae73af7abad4a8b23fdf | 205 | py | Python | exe.curso em video/def 20.py | Lorenzo-Lopes/Python-Estudo | 7ee623ce29b6a0e9fac48189fbd9c641be84d418 | [
"MIT"
] | null | null | null | exe.curso em video/def 20.py | Lorenzo-Lopes/Python-Estudo | 7ee623ce29b6a0e9fac48189fbd9c641be84d418 | [
"MIT"
] | null | null | null | exe.curso em video/def 20.py | Lorenzo-Lopes/Python-Estudo | 7ee623ce29b6a0e9fac48189fbd9c641be84d418 | [
"MIT"
] | null | null | null | import random
n1 = str(input('nome 1='))
n2 = str(input('nome 2='))
n3 = str(input('nome 3='))
n4 = str(input('nome 4='))
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print('nova ordem{}'.format(lista))
| 22.777778 | 35 | 0.62439 | import random
n1 = str(input('nome 1='))
n2 = str(input('nome 2='))
n3 = str(input('nome 3='))
n4 = str(input('nome 4='))
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print('nova ordem{}'.format(lista))
| true | true |
f715643f43f6a40626ee3423dc028f67fe0c8522 | 113 | py | Python | mass/views.py | lsapan/channels-mass-broadcast | 4f60e059ea9e3a861cc47250347900a4d0b8bd7f | [
"MIT"
] | null | null | null | mass/views.py | lsapan/channels-mass-broadcast | 4f60e059ea9e3a861cc47250347900a4d0b8bd7f | [
"MIT"
] | null | null | null | mass/views.py | lsapan/channels-mass-broadcast | 4f60e059ea9e3a861cc47250347900a4d0b8bd7f | [
"MIT"
] | null | null | null | from django.views.generic.base import TemplateView
class AppView(TemplateView):
template_name = 'app.html'
| 18.833333 | 50 | 0.778761 | from django.views.generic.base import TemplateView
class AppView(TemplateView):
template_name = 'app.html'
| true | true |
f71565b7be10bec97c68c9837cc952d9b34744fe | 886 | py | Python | s5/local/extract_text.py | cadia-lvl/althingi-asr | 8830f40b8302834fc8176727a36ca8136cd9eedc | [
"Apache-2.0"
] | 4 | 2017-11-10T19:43:43.000Z | 2019-07-03T12:12:09.000Z | egs/althingi/s5/local/extract_text.py | ingarun/kaldi | 362ad3235c9740c0dfb0481cfbff81ca4604222d | [
"Apache-2.0"
] | null | null | null | egs/althingi/s5/local/extract_text.py | ingarun/kaldi | 362ad3235c9740c0dfb0481cfbff81ca4604222d | [
"Apache-2.0"
] | 1 | 2017-11-06T18:28:53.000Z | 2017-11-06T18:28:53.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import glob
import os
import codecs
import re
#from bs4 import BeautifulSoup
with codecs.open(sys.argv[2],'w',encoding='utf-8') as fout:
xmlpaths = glob.glob(os.path.join(sys.argv[1],'*.xml'))
for file in xmlpaths:
file_base = os.path.splitext(os.path.basename(file))[0]
with codecs.open(file,'r',encoding='utf-8') as fin:
#soup = BeautifulSoup(fin, 'lxml-xml')
#speech=soup.find('ræðutexti')
data=fin.read().replace('\n', ' ')
if re.search('<ræðutexti>(.*)</ræðutexti>',data) == None:
print(file_base, file=fout)
else:
body_txt = re.search('<ræðutexti>(.*)</ræðutexti>',data).group()
text = ' '.join([file_base, body_txt]).strip().replace('\n', ' ')
print(text, file=fout)
| 35.44 | 81 | 0.565463 |
import sys
import glob
import os
import codecs
import re
with codecs.open(sys.argv[2],'w',encoding='utf-8') as fout:
xmlpaths = glob.glob(os.path.join(sys.argv[1],'*.xml'))
for file in xmlpaths:
file_base = os.path.splitext(os.path.basename(file))[0]
with codecs.open(file,'r',encoding='utf-8') as fin:
data=fin.read().replace('\n', ' ')
if re.search('<ræðutexti>(.*)</ræðutexti>',data) == None:
print(file_base, file=fout)
else:
body_txt = re.search('<ræðutexti>(.*)</ræðutexti>',data).group()
text = ' '.join([file_base, body_txt]).strip().replace('\n', ' ')
print(text, file=fout)
| true | true |
f715670072d1d78f8599fbbeeae887a4a8b445cc | 2,970 | py | Python | news/views.py | serg1ua/today-ua | 6a3ad99c924884db81ecbdb5d3dc2255dd927b4e | [
"MIT"
] | null | null | null | news/views.py | serg1ua/today-ua | 6a3ad99c924884db81ecbdb5d3dc2255dd927b4e | [
"MIT"
] | null | null | null | news/views.py | serg1ua/today-ua | 6a3ad99c924884db81ecbdb5d3dc2255dd927b4e | [
"MIT"
] | null | null | null | import json
from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from .models import Article
get_articles = 10
# Create your views here.
def index(request):
# print(dir(request))
return HttpResponseRedirect(reverse("main"))
def main(request):
try:
global get_articles
db_len = Article.objects.all().count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.all().order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": "Головна",
"articles_3": articles[:3],
"articles_7": articles[3:10]
}
return render(request, "news/articles.html", context)
def section(request, selector):
try:
global get_articles
db_len = Article.objects.filter(tag__iexact=selector).count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.filter(tag__iexact=selector).order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": selector,
"articles_3": articles[:3],
"articles_7": articles[4:10]
}
return render(request, "news/articles.html", context)
def article(request, selector, article):
# print(f"{selector} & {article}")
try:
article = Article.objects.get(header__iexact=article)
except Article.DoesNotExist:
raise Http404("Articles not found")
context = {
"article": article
}
return render(request, "news/article.html", context)
def api_articles(request, params):
params = params.split('&')
tag = params[0]
count = int(params[1])
try:
if tag == 'Головна':
db_length = db_get_len(Article.objects.all().count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.all().values()[count-10:get_ten]
else:
db_length = db_get_len(Article.objects.filter(tag__iexact=tag).count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.filter(tag__iexact=tag).values()[count-10:get_ten]
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(list(articles), content_type='application/json', safe=False)
def api_article(request, tag, param):
try:
article = Article.objects.values().get(header__iexact=param)
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(article, content_type='application/json', safe=False)
def db_get_len(db, artcls):
if db < artcls:
return artcls - (artcls - db)
else:
return artcls
def ten_getter(length, count):
if (length - count) > 10:
return 10
else:
return length
| 27.757009 | 90 | 0.653535 | import json
from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from .models import Article
get_articles = 10
def index(request):
return HttpResponseRedirect(reverse("main"))
def main(request):
try:
global get_articles
db_len = Article.objects.all().count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.all().order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": "Головна",
"articles_3": articles[:3],
"articles_7": articles[3:10]
}
return render(request, "news/articles.html", context)
def section(request, selector):
try:
global get_articles
db_len = Article.objects.filter(tag__iexact=selector).count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.filter(tag__iexact=selector).order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": selector,
"articles_3": articles[:3],
"articles_7": articles[4:10]
}
return render(request, "news/articles.html", context)
def article(request, selector, article):
try:
article = Article.objects.get(header__iexact=article)
except Article.DoesNotExist:
raise Http404("Articles not found")
context = {
"article": article
}
return render(request, "news/article.html", context)
def api_articles(request, params):
params = params.split('&')
tag = params[0]
count = int(params[1])
try:
if tag == 'Головна':
db_length = db_get_len(Article.objects.all().count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.all().values()[count-10:get_ten]
else:
db_length = db_get_len(Article.objects.filter(tag__iexact=tag).count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.filter(tag__iexact=tag).values()[count-10:get_ten]
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(list(articles), content_type='application/json', safe=False)
def api_article(request, tag, param):
try:
article = Article.objects.values().get(header__iexact=param)
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(article, content_type='application/json', safe=False)
def db_get_len(db, artcls):
if db < artcls:
return artcls - (artcls - db)
else:
return artcls
def ten_getter(length, count):
if (length - count) > 10:
return 10
else:
return length
| true | true |
f715672884f4a836d1e1572dda38ea12b14cfb27 | 8,758 | py | Python | lib/rucio/tests/conftest.py | chrisburr/rucio | 735f628231cd9fae64adc31c9f548b14d5ca01d3 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/conftest.py | chrisburr/rucio | 735f628231cd9fae64adc31c9f548b14d5ca01d3 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/conftest.py | chrisburr/rucio | 735f628231cd9fae64adc31c9f548b14d5ca01d3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Mayank Sharma <mayank.sharma@cern.ch>, 2021
from __future__ import print_function
import traceback
import pytest
# local imports in the fixtures to make this file loadable in e.g. client tests
@pytest.fixture(scope='session')
def vo():
from rucio.common.config import config_get_bool, config_get
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
return config_get('client', 'vo', raise_exception=False, default='tst')
else:
return 'def'
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(vo):
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
yield factory
@pytest.fixture
def did_factory(vo, mock_scope):
from rucio.tests.temp_factories import TemporaryDidFactory
with TemporaryDidFactory(vo=vo, default_scope=mock_scope) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from rucio.tests.temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
# Create an in-memory dropdown replacement table for the "models.Config" table
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = 'configs_' + generate_uuid()
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'), )
InMemoryBase.metadata.create_all()
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
with ExitStack() as stack:
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch('{}.{}'.format(module, 'REGION'), new=region))
yield
| 33.945736 | 125 | 0.716716 |
from __future__ import print_function
import traceback
import pytest
@pytest.fixture(scope='session')
def vo():
from rucio.common.config import config_get_bool, config_get
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
return config_get('client', 'vo', raise_exception=False, default='tst')
else:
return 'def'
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(vo):
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
yield factory
@pytest.fixture
def did_factory(vo, mock_scope):
from rucio.tests.temp_factories import TemporaryDidFactory
with TemporaryDidFactory(vo=vo, default_scope=mock_scope) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from rucio.tests.temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
@pytest.fixture
def core_config_mock(request):
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = 'configs_' + generate_uuid()
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'), )
InMemoryBase.metadata.create_all()
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def caches_mock(request):
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
with ExitStack() as stack:
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch('{}.{}'.format(module, 'REGION'), new=region))
yield
| true | true |
f71567dd29ec05db09822d985693d9e42b0f36ec | 394 | py | Python | src/param.py | xiajing10/akec | 239fdda923c8a0743f56dbf0a009fa2235b85451 | [
"MIT"
] | 14 | 2021-01-28T07:13:25.000Z | 2022-02-10T06:41:32.000Z | src/param.py | xiajing10/akec | 239fdda923c8a0743f56dbf0a009fa2235b85451 | [
"MIT"
] | 2 | 2021-04-14T15:24:30.000Z | 2021-05-06T07:02:08.000Z | src/param.py | xiajing10/akec | 239fdda923c8a0743f56dbf0a009fa2235b85451 | [
"MIT"
] | 1 | 2021-07-09T02:52:59.000Z | 2021-07-09T02:52:59.000Z | # -*- coding: utf-8 -*-
"""
@author: eilxaix
"""
param = {
'data_path': '../dataset/ieee_xai.csv',
'terms_path': '../dataset/domain_terms.txt',
'conceptnet_emb': './embed_data/numberbatch-en-19.08.txt',
'elmo_options':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json',
'elmo_weight':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5'
} | 32.833333 | 87 | 0.690355 |
param = {
'data_path': '../dataset/ieee_xai.csv',
'terms_path': '../dataset/domain_terms.txt',
'conceptnet_emb': './embed_data/numberbatch-en-19.08.txt',
'elmo_options':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json',
'elmo_weight':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5'
} | true | true |
f71568ebb68f9faa9d28c57f4a9204630dfb7a43 | 18,797 | py | Python | transfer_subnet/xiaoketransfer2.py | LenKerr/Colorization-1 | bcfcdb24fc8ab107d34644d5a63b018f86784e21 | [
"MIT"
] | 30 | 2020-06-21T09:29:51.000Z | 2022-03-26T07:32:52.000Z | transfer_subnet/xiaoketransfer2.py | LenKerr/Colorization-1 | bcfcdb24fc8ab107d34644d5a63b018f86784e21 | [
"MIT"
] | 5 | 2020-09-27T09:45:44.000Z | 2021-10-20T11:45:04.000Z | transfer_subnet/xiaoketransfer2.py | xuzhongyou/Colorization | bcfcdb24fc8ab107d34644d5a63b018f86784e21 | [
"MIT"
] | 7 | 2020-07-03T02:55:25.000Z | 2021-12-18T10:38:41.000Z | """
Copyright (c) 2019 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
import sys
sys.path.append('./segmentation')
import os
import tqdm
import argparse
import torch
from torchvision.utils import save_image
import torch.nn as nn
# from model import WaveEncoder, WaveDecoder
from utils.core import feature_wct
from utils.core import feature_adin
from utils.core import feature_adin_without_segment
from utils.core import feature_wct_without_segment
from utils.io import Timer, open_image, load_segment, compute_label_info
from xiaokemodel import XiaoKeEncoder, XiaoKeDecoder
import numpy as np
import torchvision.transforms as transforms
from scipy.io import loadmat
from PIL import Image
from scipy.misc import imread, imresize
import cv2
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
import datetime
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
class WCT2:
def __init__(self, model_path='./model_checkpoints', transfer_at=['encoder', 'skip', 'decoder'], option_unpool='cat5', device='cuda:0', verbose=False):
self.transfer_at = set(transfer_at)
assert not(self.transfer_at - set(['encoder', 'decoder', 'skip'])), 'invalid transfer_at: {}'.format(transfer_at)
assert self.transfer_at, 'empty transfer_at'
model_path = './xiaoke_video_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_0.0001_4.pth'
model_path = './xiaoke_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_87.pth'
self.device = torch.device(device)
self.verbose = verbose
# self.encoder = WaveEncoder(option_unpool).to(self.device)
# self.decoder = WaveDecoder(option_unpool).to(self.device)
# self.encoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)), map_location=lambda storage, loc: storage))
# self.decoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)), map_location=lambda storage, loc: storage))
self.encoder = XiaoKeEncoder(option_unpool).to(self.device)
self.decoder = XiaoKeDecoder(option_unpool).to(self.device)
self.encoder.load_state_dict(torch.load(os.path.join(model_path,encoder_path),map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(torch.load(os.path.join(model_path,decoder_path),map_location=lambda storage, loc: storage))
def print_(self, msg):
if self.verbose:
print(msg)
def encode(self, x, skips, level):
return self.encoder.encode(x, skips, level)
def decode(self, x, skips, level):
return self.decoder.decode(x, skips, level)
def get_all_feature(self, x):
skips = {}
feats = {'encoder': {}, 'decoder': {}}
for level in [1, 2, 3, 4]:
x = self.encode(x, skips, level)
if 'encoder' in self.transfer_at:
feats['encoder'][level] = x
if 'encoder' not in self.transfer_at:
feats['decoder'][4] = x
for level in [4, 3, 2]:
x = self.decode(x, skips, level)
if 'decoder' in self.transfer_at:
feats['decoder'][level - 1] = x
return feats, skips
def transfer(self, content, style, content_segment, style_segment, alpha=1,is_wct=False):
content_feat, content_skips = content, {}
style_feats, style_skips = self.get_all_feature(style)
wct2_enc_level = [1, 2, 3, 4]
wct2_dec_level = [1, 2, 3, 4]
wct2_skip_level = ['pool1', 'pool2', 'pool3']
label_set,label_indicator = None, None
for level in [1, 2, 3, 4]:
content_feat = self.encode(content_feat, content_skips, level)
if 'encoder' in self.transfer_at and level in wct2_enc_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else:
content_feat = feature_adin_without_segment(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at encoder {}'.format(level))
if 'skip' in self.transfer_at:
for skip_level in wct2_skip_level:
if is_wct:
content_skips[skip_level] = feature_wct(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_skips[skip_level] = feature_adin_without_segment(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at skip {}'.format(skip_level))
for level in [4, 3, 2, 1]:
if 'decoder' in self.transfer_at and level in style_feats['decoder'] and level in wct2_dec_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_feat = feature_adin_without_segment(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at decoder {}'.format(level))
content_feat = self.decode(content_feat, content_skips, level)
return content_feat
def get_all_transfer():
ret = []
for e in ['encoder']:
for d in ['decoder']:
for s in ['skip']:
_ret = set([e, d, s]) & set(['encoder', 'decoder', 'skip'])
if _ret:
ret.append(_ret)
return ret
# def get_single_transfer():
# return ['encoder', 'decoder', 'skip']
def run_bulk():
accurate_segment = True
device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
device = torch.device(device)
transfer_at = set()
if config.transfer_at_encoder:
transfer_at.add('encoder')
if config.transfer_at_decoder:
transfer_at.add('decoder')
if config.transfer_at_skip:
transfer_at.add('skip')
# cw, ch = 640,360
cw, ch = 640,400
# The filenames of the content and style pair should match
c_transforms = transforms.Compose([transforms.Resize((ch,cw), interpolation=Image.NEAREST),transforms.CenterCrop((ch // 16 * 16, cw // 16 * 16)),transforms.ToTensor()])
fnames = os.listdir(config.content)
fnames.sort()
print('transfer at ~~~~',transfer_at)
style = Image.open(config.style).convert('RGB')
style = c_transforms(style).unsqueeze(0).to(device)
sample_fnames = fnames[:50]
for fname in tqdm.tqdm(sample_fnames):
if not is_image_file(fname):
print('invalid file (is not image), ', fname)
continue
print('config.wct is ',config.is_wct)
# content
_content = os.path.join(config.content, fname)
content = Image.open(_content).convert('RGB') # 别忘了这边的to(device)
content = c_transforms(content).unsqueeze(0).to(device)
print('current frame {} and shape is {}'.format(fname,content.shape))
# _content_segment = os.path.join(config.content_segment, fname) if config.content_segment else None
# _style_segment = os.path.join(config.style_segment, fname) if config.style_segment else None
_output = os.path.join(config.output, fname)
content_segment,style_segment = None,None
if not config.transfer_all:
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', _output)
wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
with torch.no_grad():
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
save_image(img.clamp_(0, 1), fname_output, padding=0)
else:
for _transfer_at in get_all_transfer():
print('location for transfer at~~~~',_transfer_at)
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(_transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', fname,'-',_transfer_at)
wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
# print('wct2 model encoder ',wct2.encoder)
# print('wcr2 model decoder ',wct2.decoder)
with torch.no_grad():
starttime = datetime.datetime.now()
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
endtime = datetime.datetime.now()
print('xiaoke with adin 运行时间为----',(endtime - starttime))
save_image(img.clamp_(0, 1), fname_output, padding=0)
# break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--content', type=str, default='./examples/content')
parser.add_argument('--content_segment', type=str, default='./examples/content_segment')
parser.add_argument('--style', type=str, default='./examples/style')
parser.add_argument('--style_segment', type=str, default='./examples/style_segment')
parser.add_argument('--output', type=str, default='./outputs')
parser.add_argument('--image_size', type=int, default=512)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--option_unpool', type=str, default='cat5', choices=['sum', 'cat5'])
parser.add_argument('-e', '--transfer_at_encoder', action='store_true')
parser.add_argument('-d', '--transfer_at_decoder', action='store_true')
parser.add_argument('-s', '--transfer_at_skip', action='store_true')
parser.add_argument('-a', '--transfer_all', action='store_true')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--is_wct',action='store_true')
parser.add_argument('--label_mapping', type=str, default='ade20k_semantic_rel.npy')
parser.add_argument('--model_path', help='folder to model path', default='baseline-resnet50_dilated8-ppm_bilinear_deepsup')
parser.add_argument('--arch_encoder', default='resnet50_dilated8', help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup', help="architecture of net_decoder")
parser.add_argument('--suffix', default='_epoch_20.pth', help="which snapshot to load")
parser.add_argument('--fc_dim', default=2048, type=int, help='number of features between encoder and decoder')
parser.add_argument('--num_class', default=150, type=int, help='number of classes')
parser.add_argument('--padding_constant', default=8, type=int, help='maxmimum downsampling rate of the network')
parser.add_argument('--gpu_id', default=0, type=int, help='gpu_id for evaluation')
parser.add_argument('--imgSize', default=[300, 400, 500, 600], nargs='+', type=int, help='list of input image sizes.' 'for multiscale testing, e.g. 300 400 500')
# 不能定义两次同样的参数
config = parser.parse_args()
transform = transforms.Compose([transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])])
print(config)
if not os.path.exists(os.path.join(config.output)):
os.makedirs(os.path.join(config.output))
run_bulk()
# 树林的图片
# 171124_D1_HD_01
# 170216A_122_ForestTrail_1080
# 170216A_070_LookingUpThroughForest_1080
# 180705_01_0
# 190416_10_Drone1_0
# Forest_15_1_Videv
# Forest_15_4_Videv
# on
# WalkingThroughTreesatSunsetVidev
# 树叶
# Autumn_leaves_in_motion_0
# autumn_leaves
# autumn-leaves-blowing-in-the-wind-H264
# 180705_01_0
# 海浪
# 46234354
# walking_on_the_beac
# 雪山
# 180607_A_00
# 开车
# 180607_A_10
# 飞机
# Airbus_A380_Landing_2__Videv
# Evening_landin
# PlaneLand
# 海边瑜伽
# Ao_Nang_Beach_Yoga_MP4_HDV_1080p25__TanuriX_Stock_Footage_N
# MVI_126
# 水稻
# Barley_3_Videv
# HandStrokin
# wild_gras
# windygrassnoaudi-
# 船
# beach1
# sailing_boa
# 天空
# Becco_di_Filadonna_su_Vall
# Blue_Sky_and_Clouds_Timelapse_0892__Videv
# 老鼠
# CotswoldSequence
# 奶牛
# cow
# Cow_Mother_and_cal
# Cows_
# Limousin_Cows_1__VIdev
# Limousin_Cows_2__Videv
# 日落
# Lonely_tree_at_sunset_CCBY_NatureCli
# MilkyWaywithTreeVidev
# SilhouetteJogge
# Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W
# Sunris
# TimelapseSunse
# Wakeboarding_on_the_Lak
# 马
# Dirty_Hors
# 黑白鸟
# Pigeon-Stock-Vide
# Red_fod
# Weave
# 海鸥
# seagul-H264
# seagulls_on_the_beac
# 建筑
# Run_5_wo_metadata_h264420_720p_UH
# 鸭子
# SeaBirdsSwimming_
# Swans__1287_
# 羊
# Shee
'''
CUDA_VISIBLE_DEVICES=6 python transfer.py --content ./examples/content --style ./examples/style --content_segment ./examples/content_segment --style_segment ./examples/style_segment/ --output ./outputs/ --verbose --image_size 512 -a
'''
'''
python xiaoketransfer.py --content ./examples/demo_content/ --style ./examples/demo_style/ -a --output ./examples/demo_stylization --is_wct --image_size 400
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer.py --content ./examples/dataset/alley_2/ --style ./examples/dataset/fangao.png -a --output ./examples/stylization
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/temple_2 --style ./examples/data/fangao.png -a --output ./examples/stylization
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/mountain_1 --style ./examples/data/fangao.png -a --output ./examples/stylization
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/temple_2 --style ./examples/data/fangao.png -a --output ./examples/stylization --is_wct
'''
'''
'../data/video-picture/160825_26_WindTurbines4_1080'
python xiaoketransfer2.py --content ../data/video-picture/160825_26_WindTurbines4_1080 --style ./examples/data/fangao.png -a --output ./examples/160825_26_WindTurbines4_1080_adain
'''
'''
'../data/video-picture/xxx'
python xiaoketransfer2.py --content ../data/video-picture/180705_01_0 --style ../data/reference/tar0056_orange_forest.png -a --output ./examples/Forest_15_4_Videv
python xiaoketransfer.py --content ../data/video-picture/Red_fod --style ../data/video-picture/Weave/frame_0001.png -a --output ./examples/Red_fod_seg
python xiaoketransfer.py --content ../data/video-picture/seagulls_on_the_beac --style ../data/video-picture/seagul-H264/frame_0001.png -a --output ./examples/seagulls_on_the_beac_seg
python xiaoketransfer2.py --content ../data/video-picture/HandStrokin --style ../data/video-picture/Barley_3_Videv/frame_0001.png -a --output ./examples/HandStrokin
python xiaoketransfer.py --content ../data/video-picture/Swans__1287_ --style ../data/video-picture/SeaBirdsSwimming_/frame_0001.png -a --output ./examples/Swans__1287_
python xiaoketransfer.py --content ../data/video-picture/Becco_di_Filadonna_su_Vall --style ../data/video-picture/Blue_Sky_and_Clouds_Timelapse_0892__Videv/frame_0001.png -a --output ./examples/Becco_di_Filadonna_su_Vall
python xiaoketransfer.py --content ../data/video-picture/Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W --style ../data/video-picture/Lonely_tree_at_sunset_CCBY_NatureCli/frame_0004.png -a --output ./examples/Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W
python xiaoketransfer2.py --content ../data/video-picture/Wakeboarding_on_the_Lak --style ../data/video-picture/Sunris/frame_0004.png -a --output ./examples/Wakeboarding_on_the_Lak
# Barley_3_Videv
# HandStrokin
# Pigeon-Stock-Vide
# Red_fod
''' | 42.623583 | 297 | 0.665638 | import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
import sys
sys.path.append('./segmentation')
import os
import tqdm
import argparse
import torch
from torchvision.utils import save_image
import torch.nn as nn
from utils.core import feature_wct
from utils.core import feature_adin
from utils.core import feature_adin_without_segment
from utils.core import feature_wct_without_segment
from utils.io import Timer, open_image, load_segment, compute_label_info
from xiaokemodel import XiaoKeEncoder, XiaoKeDecoder
import numpy as np
import torchvision.transforms as transforms
from scipy.io import loadmat
from PIL import Image
from scipy.misc import imread, imresize
import cv2
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
import datetime
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
class WCT2:
def __init__(self, model_path='./model_checkpoints', transfer_at=['encoder', 'skip', 'decoder'], option_unpool='cat5', device='cuda:0', verbose=False):
self.transfer_at = set(transfer_at)
assert not(self.transfer_at - set(['encoder', 'decoder', 'skip'])), 'invalid transfer_at: {}'.format(transfer_at)
assert self.transfer_at, 'empty transfer_at'
model_path = './xiaoke_video_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_0.0001_4.pth'
model_path = './xiaoke_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_87.pth'
self.device = torch.device(device)
self.verbose = verbose
self.encoder = XiaoKeEncoder(option_unpool).to(self.device)
self.decoder = XiaoKeDecoder(option_unpool).to(self.device)
self.encoder.load_state_dict(torch.load(os.path.join(model_path,encoder_path),map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(torch.load(os.path.join(model_path,decoder_path),map_location=lambda storage, loc: storage))
def print_(self, msg):
if self.verbose:
print(msg)
def encode(self, x, skips, level):
return self.encoder.encode(x, skips, level)
def decode(self, x, skips, level):
return self.decoder.decode(x, skips, level)
def get_all_feature(self, x):
skips = {}
feats = {'encoder': {}, 'decoder': {}}
for level in [1, 2, 3, 4]:
x = self.encode(x, skips, level)
if 'encoder' in self.transfer_at:
feats['encoder'][level] = x
if 'encoder' not in self.transfer_at:
feats['decoder'][4] = x
for level in [4, 3, 2]:
x = self.decode(x, skips, level)
if 'decoder' in self.transfer_at:
feats['decoder'][level - 1] = x
return feats, skips
def transfer(self, content, style, content_segment, style_segment, alpha=1,is_wct=False):
content_feat, content_skips = content, {}
style_feats, style_skips = self.get_all_feature(style)
wct2_enc_level = [1, 2, 3, 4]
wct2_dec_level = [1, 2, 3, 4]
wct2_skip_level = ['pool1', 'pool2', 'pool3']
label_set,label_indicator = None, None
for level in [1, 2, 3, 4]:
content_feat = self.encode(content_feat, content_skips, level)
if 'encoder' in self.transfer_at and level in wct2_enc_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else:
content_feat = feature_adin_without_segment(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at encoder {}'.format(level))
if 'skip' in self.transfer_at:
for skip_level in wct2_skip_level:
if is_wct:
content_skips[skip_level] = feature_wct(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_skips[skip_level] = feature_adin_without_segment(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at skip {}'.format(skip_level))
for level in [4, 3, 2, 1]:
if 'decoder' in self.transfer_at and level in style_feats['decoder'] and level in wct2_dec_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_feat = feature_adin_without_segment(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at decoder {}'.format(level))
content_feat = self.decode(content_feat, content_skips, level)
return content_feat
def get_all_transfer():
ret = []
for e in ['encoder']:
for d in ['decoder']:
for s in ['skip']:
_ret = set([e, d, s]) & set(['encoder', 'decoder', 'skip'])
if _ret:
ret.append(_ret)
return ret
def run_bulk():
accurate_segment = True
device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
device = torch.device(device)
transfer_at = set()
if config.transfer_at_encoder:
transfer_at.add('encoder')
if config.transfer_at_decoder:
transfer_at.add('decoder')
if config.transfer_at_skip:
transfer_at.add('skip')
cw, ch = 640,400
c_transforms = transforms.Compose([transforms.Resize((ch,cw), interpolation=Image.NEAREST),transforms.CenterCrop((ch // 16 * 16, cw // 16 * 16)),transforms.ToTensor()])
fnames = os.listdir(config.content)
fnames.sort()
print('transfer at ~~~~',transfer_at)
style = Image.open(config.style).convert('RGB')
style = c_transforms(style).unsqueeze(0).to(device)
sample_fnames = fnames[:50]
for fname in tqdm.tqdm(sample_fnames):
if not is_image_file(fname):
print('invalid file (is not image), ', fname)
continue
print('config.wct is ',config.is_wct)
_content = os.path.join(config.content, fname)
content = Image.open(_content).convert('RGB')
content = c_transforms(content).unsqueeze(0).to(device)
print('current frame {} and shape is {}'.format(fname,content.shape))
_output = os.path.join(config.output, fname)
content_segment,style_segment = None,None
if not config.transfer_all:
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', _output)
wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
with torch.no_grad():
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
save_image(img.clamp_(0, 1), fname_output, padding=0)
else:
for _transfer_at in get_all_transfer():
print('location for transfer at~~~~',_transfer_at)
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(_transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', fname,'-',_transfer_at)
wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
with torch.no_grad():
starttime = datetime.datetime.now()
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
endtime = datetime.datetime.now()
print('xiaoke with adin 运行时间为----',(endtime - starttime))
save_image(img.clamp_(0, 1), fname_output, padding=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--content', type=str, default='./examples/content')
parser.add_argument('--content_segment', type=str, default='./examples/content_segment')
parser.add_argument('--style', type=str, default='./examples/style')
parser.add_argument('--style_segment', type=str, default='./examples/style_segment')
parser.add_argument('--output', type=str, default='./outputs')
parser.add_argument('--image_size', type=int, default=512)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--option_unpool', type=str, default='cat5', choices=['sum', 'cat5'])
parser.add_argument('-e', '--transfer_at_encoder', action='store_true')
parser.add_argument('-d', '--transfer_at_decoder', action='store_true')
parser.add_argument('-s', '--transfer_at_skip', action='store_true')
parser.add_argument('-a', '--transfer_all', action='store_true')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--is_wct',action='store_true')
parser.add_argument('--label_mapping', type=str, default='ade20k_semantic_rel.npy')
parser.add_argument('--model_path', help='folder to model path', default='baseline-resnet50_dilated8-ppm_bilinear_deepsup')
parser.add_argument('--arch_encoder', default='resnet50_dilated8', help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup', help="architecture of net_decoder")
parser.add_argument('--suffix', default='_epoch_20.pth', help="which snapshot to load")
parser.add_argument('--fc_dim', default=2048, type=int, help='number of features between encoder and decoder')
parser.add_argument('--num_class', default=150, type=int, help='number of classes')
parser.add_argument('--padding_constant', default=8, type=int, help='maxmimum downsampling rate of the network')
parser.add_argument('--gpu_id', default=0, type=int, help='gpu_id for evaluation')
parser.add_argument('--imgSize', default=[300, 400, 500, 600], nargs='+', type=int, help='list of input image sizes.' 'for multiscale testing, e.g. 300 400 500')
config = parser.parse_args()
transform = transforms.Compose([transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])])
print(config)
if not os.path.exists(os.path.join(config.output)):
os.makedirs(os.path.join(config.output))
run_bulk()
| true | true |
f7156985db791f120b85f1fb241fe315d40a1d08 | 16,415 | py | Python | retrain_yolo.py | mukulbhave/YAD2K | a6174285e036f95df83783b7b4d951094cbb08c8 | [
"MIT"
] | null | null | null | retrain_yolo.py | mukulbhave/YAD2K | a6174285e036f95df83783b7b4d951094cbb08c8 | [
"MIT"
] | null | null | null | retrain_yolo.py | mukulbhave/YAD2K | a6174285e036f95df83783b7b4d951094cbb08c8 | [
"MIT"
] | null | null | null | """
This is a script that can be used to retrain the YOLOv2 model for your own dataset.
"""
import argparse
import os
from PIL import ImageOps
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras import regularizers
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
import h5py
import io
from yolo_data_gen import *
# Args
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('..', 'DATA', 'underwater_data.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('..', 'DATA', 'underwater_classes.txt'))
# Default anchor boxes
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
dataset = h5py.File(data_path,'r+')
anchors = YOLO_ANCHORS
#detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)
model_body, model = create_model(anchors, class_names)
train( model, class_names, anchors, dataset) # image_data, boxes, detectors_mask, matching_true_boxes )
# TODO use data generator for draw as well
# draw(model_body,
# class_names,
# anchors,
# image_data,
# image_set='all', # assumes test set is 0.9
# weights_name='trained_stage_3_best.h5',
# save_all=True)
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
#Exactly Same as process data but handles images of different sizes in dataset
def scale_data(images, boxes=None):
'''processes the data'''
img_shape = (416,416)
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
# Box preprocessing.
if boxes is not None:
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
# get original size of each image and and convert the coordinates and w h
processed_images = []
for i,img in enumerate(images):
orig_size = np.array([images[i].width, images[i].height])
boxes_xy[i] = boxes_xy[i] / orig_size
boxes_wh[i] = boxes_wh[i] / orig_size
images_i = images[i].resize(img_shape, PIL.Image.BICUBIC)
images_i = np.array(images_i, dtype=np.float)
processed_images.append(images_i/255)
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
processed_images = [resize_image(i,img_shape[0],img_shape[1],False) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
return np.array(processed_images)
def process_data(images, boxes=None):
'''processes the data'''
#images = [PIL.Image.fromarray(i) for i in images]
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
print(type(images[0]))
# Image preprocessing.
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
#processed_images = [resize_image(i,416,416,False) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
# Box preprocessing.
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get extents as y_min, x_min, y_max, x_max, class for comparision with
# model output.
#boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
'''
Precompute detectors_mask and matching_true_boxes for training.
Detectors mask is 1 for each spatial position in the final conv layer and
anchor that should be active for the given boxes and 0 otherwise.
Matching true boxes gives the regression targets for the ground truth box
that caused a detector to be active or 0 otherwise.
'''
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
'''
returns the body of the model and the model
# Params:
load_pretrained: whether or not to load the pretrained model or initialize all weights
freeze_body: whether or not to freeze all weights except for the last layer's
# Returns:
model_body: YOLOv2 with new output layer
model: YOLOv2 with custom loss Lambda layer
'''
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
# Create model input layers.
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
# Create model body.
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
# Save topless yolo:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear',kernel_regularizer= regularizers.l2(5e-4))(topless_yolo.output)
model_body = Model(image_input, final_layer)
# Place model loss on CPU to reduce GPU memory usage.
with tf.device('/cpu:0'):
# TODO: Replace Lambda with custom Keras layer for loss.
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, dataset):#image_data, boxes, detectors_mask, matching_true_boxes, validation_split=0.1):
'''
retrain/fine-tune the model
logs training with tensorboard
saves training weights in current directory
best weights according to val_loss is saved as trained_stage_3_best.h5
'''
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
logging = TensorBoard()#log_dir='./train_logs', histogram_freq=1, write_graph=False, write_images=True)
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
batch_size = 8
dataTrain = dataset['train']
dataVal= dataset['val']
train_set_size =dataTrain.attrs['dataset_size']
val_set_size =dataVal.attrs['dataset_size']
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
# model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
# np.zeros(len(image_data)),
# validation_split=validation_split,
# batch_size=8,
# epochs=5,
# callbacks=[logging])
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=5,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_1.h5')
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
# model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
# np.zeros(len(image_data)),
# validation_split=validation_split,
# batch_size=8,
# epochs=30,
# callbacks=[logging])
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_2.h5')
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging, checkpoint, early_stopping])
# model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
# np.zeros(len(image_data)),
# validation_split=validation_split,
# batch_size=8,
# epochs=30,
# callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
'''
Draw bounding boxes on image data
'''
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
# model.load_weights(weights_name)
print(image_data.shape)
model_body.load_weights(weights_name)
# Create output variables for prediction.
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.7, iou_threshold=0.7)
# Run prediction on overfit image.
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
# Plot image with predicted boxes.
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores,out_path+"\\"+str(i)+'.jpg')
# Save the image:
if save_all :
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.jpg'))
# To display (pauses the program):
plt.imshow(image_with_boxes, interpolation='nearest')
plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
| 39.176611 | 157 | 0.648005 | import argparse
import os
from PIL import ImageOps
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras import regularizers
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
import h5py
import io
from yolo_data_gen import *
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('..', 'DATA', 'underwater_data.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('..', 'DATA', 'underwater_classes.txt'))
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
dataset = h5py.File(data_path,'r+')
anchors = YOLO_ANCHORS
model_body, model = create_model(anchors, class_names)
train( model, class_names, anchors, dataset)
get_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
def scale_data(images, boxes=None):
img_shape = (416,416)
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
if boxes is not None:
boxes = [box.reshape((-1, 5)) for box in boxes]
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
processed_images = []
for i,img in enumerate(images):
orig_size = np.array([images[i].width, images[i].height])
boxes_xy[i] = boxes_xy[i] / orig_size
boxes_wh[i] = boxes_wh[i] / orig_size
images_i = images[i].resize(img_shape, PIL.Image.BICUBIC)
images_i = np.array(images_i, dtype=np.float)
processed_images.append(images_i/255)
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
processed_images = [resize_image(i,img_shape[0],img_shape[1],False) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
return np.array(processed_images)
def process_data(images, boxes=None):
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
print(type(images[0]))
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
boxes = [box.reshape((-1, 5)) for box in boxes]
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear',kernel_regularizer= regularizers.l2(5e-4))(topless_yolo.output)
model_body = Model(image_input, final_layer)
with tf.device('/cpu:0'):
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, dataset):
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
})
logging = TensorBoard()
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
batch_size = 8
dataTrain = dataset['train']
dataVal= dataset['val']
train_set_size =dataTrain.attrs['dataset_size']
val_set_size =dataVal.attrs['dataset_size']
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=5,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_1.h5')
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
})
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_2.h5')
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
print(image_data.shape)
model_body.load_weights(weights_name)
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.7, iou_threshold=0.7)
sess = K.get_session()
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores,out_path+"\\"+str(i)+'.jpg')
if save_all :
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.jpg'))
plt.imshow(image_with_boxes, interpolation='nearest')
plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
| true | true |
f7156a419031a548f8c6765c306917d7d0a579d2 | 399 | py | Python | chilled-vibes.py | bcgreen24/ten-lines-or-less | 7a34ff7d7222fd3946e9cbb418afc992bc84e5e5 | [
"MIT"
] | 44 | 2018-08-15T08:32:43.000Z | 2022-02-15T20:25:03.000Z | chilled-vibes.py | bcgreen24/ten-lines-or-less | 7a34ff7d7222fd3946e9cbb418afc992bc84e5e5 | [
"MIT"
] | null | null | null | chilled-vibes.py | bcgreen24/ten-lines-or-less | 7a34ff7d7222fd3946e9cbb418afc992bc84e5e5 | [
"MIT"
] | 7 | 2018-09-08T20:05:58.000Z | 2021-11-22T12:46:15.000Z | Clock.bpm=100; Scale.default="minor"
p1 >> pulse([0,-1,-2,-3], dur=8, lpf=600, lpr=0.2, crush=8) + (0,2,4,const(6))
p3 >> blip(p1.pitch, dur=8, sus=4, room=1, oct=6) + [0,0,0,P*(2,4,3,-1)]
p2 >> saw(P[:5][:9][:16], dur=1/4, oct=var([3,4],[12,4])).penta()
d1 >> play("(x )( x)o{ vx[xx]}", crush=16, rate=.8).every([24,5,3], "stutter", 4, dur=3)
d2 >> play("<-s>< ~*~>").every(30.5, "jump", cycle=32)
| 57 | 88 | 0.526316 | Clock.bpm=100; Scale.default="minor"
p1 >> pulse([0,-1,-2,-3], dur=8, lpf=600, lpr=0.2, crush=8) + (0,2,4,const(6))
p3 >> blip(p1.pitch, dur=8, sus=4, room=1, oct=6) + [0,0,0,P*(2,4,3,-1)]
p2 >> saw(P[:5][:9][:16], dur=1/4, oct=var([3,4],[12,4])).penta()
d1 >> play("(x )( x)o{ vx[xx]}", crush=16, rate=.8).every([24,5,3], "stutter", 4, dur=3)
d2 >> play("<-s>< ~*~>").every(30.5, "jump", cycle=32)
| true | true |
f7156ae46165bfa283ef31892e3fde5d02b6eeab | 166 | py | Python | StarAcmSpider/StarAcmSpider/items.py | MeiK-h/StarACM | 54654bdc19c8eff02c67ba77784d08368570d4e7 | [
"MIT"
] | null | null | null | StarAcmSpider/StarAcmSpider/items.py | MeiK-h/StarACM | 54654bdc19c8eff02c67ba77784d08368570d4e7 | [
"MIT"
] | null | null | null | StarAcmSpider/StarAcmSpider/items.py | MeiK-h/StarACM | 54654bdc19c8eff02c67ba77784d08368570d4e7 | [
"MIT"
] | null | null | null | import scrapy
class StarAcmSpiderItem(scrapy.Item):
username = scrapy.Field()
source = scrapy.Field()
run_id = scrapy.Field()
data = scrapy.Field()
| 18.444444 | 37 | 0.674699 | import scrapy
class StarAcmSpiderItem(scrapy.Item):
username = scrapy.Field()
source = scrapy.Field()
run_id = scrapy.Field()
data = scrapy.Field()
| true | true |
f7156b5c602c9fa2552e9ba98cbbe35c20310e78 | 15,637 | py | Python | MomentumProject/mtInitialize_004.py | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | MomentumProject/mtInitialize_004.py | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | MomentumProject/mtInitialize_004.py | hpgit/HumanFoot | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | [
"Apache-2.0"
] | null | null | null | import copy
import sys
#if '../PyCommon/modules' not in sys.path:
# sys.path.append('../PyCommon/modules')
if './modules' not in sys.path:
sys.path.append('./modules')
import Resource.ysMotionLoader as yf
import Simulator.ysPhysConfig as ypc
import Math.mmMath as mm
import Motion.ysHierarchyEdit as yme
import Motion.ysMotion as ym
## Constant
HIP = 'Hips'
RIGHT_UP_LEG = 'RightUpLeg'
RIGHT_LEG = 'RightLeg'
RIGHT_FOOT = 'RightFoot'
RIGHT_TOES = 'RightToes'
RIGHT_TOES_END = 'RightToes_Effector'
LEFT_UP_LEG = 'LeftUpLeg'
LEFT_LEG = 'LeftLeg'
LEFT_FOOT = 'LeftFoot'
LEFT_TOES = 'LeftToes'
LEFT_TOES_END = 'LeftToes_Effector'
LEFT_SHOULDER = 'LeftShoulder1'
LEFT_ARM = 'LeftArm'
LEFT_FORE_ARM = 'LeftForeArm'
LEFT_HAND = 'LeftHand'
LEFT_HAND_END = 'LeftHand_Effector'
RIGHT_SHOULDER = 'RightShoulder'
RIGHT_ARM = 'RightArm'
RIGHT_FORE_ARM = 'RightForeArm'
RIGHT_HAND = 'RightHand'
RIGHT_HAND_END = 'RightHand_Effector'
SPINE = 'Spine'
SPINE1 = 'Spine1'
HEAD = 'HEad'
HEAD_END = 'HEad_Effector'
LEFT_PHALANGE = 'LeftForeFoot'
RIGHT_PHALANGE = 'RightForeFoot'
LEFT_TARSUS = 'LeftRearFoot'
RIGHT_TARSUS = 'RightRearFoot'
LEFT_METATARSUS = 'LeftMidFoot'
RIGHT_METATARSUS = 'RightMidFoot'
LEFT_FOOT_SIDE_L = 'LeftFootSideL'
LEFT_FOOT_SIDE_R = 'LeftFootSideR'
RIGHT_FOOT_SIDE_L = 'RightFootSideL'
RIGHT_FOOT_SIDE_R = 'RightFootSideR'
'''
HIP = 'hip'
RIGHT_UP_LEG_DUMMY = 'rightuplegdummy'
RIGHT_UP_LEG = 'rightupleg'
RIGHT_LEG = 'rightleg'
RIGHT_FOOT = 'rightfoot'
RIGHT_TOES = 'righttoes'
RIGHT_TOES_END = 'righttoes_Effector'
LEFT_UP_LEG_DUMMY = 'leftuplegdummy'
LEFT_UP_LEG = 'leftupleg'
LEFT_LEG = 'leftleg'
LEFT_FOOT = 'leftfoot'
LEFT_TOES = 'lefttoes'
LEFT_TOES_END = 'lefttoes_Effector'
LEFT_SHOULDER_DUMMY = 'leftshoulder1dummy'
LEFT_SHOULDER = 'leftshoulder1'
LEFT_ARM = 'leftarm'
LEFT_FORE_ARM = 'leftforearm'
LEFT_HAND = 'lefthand'
LEFT_HAND_END = 'lefthand_Effector'
RIGHT_SHOULDER_DUMMY = 'rightshoulderdummy'
RIGHT_SHOULDER = 'rightshoulder'
RIGHT_ARM = 'rightarm'
RIGHT_FORE_ARM = 'rightforearm'
RIGHT_HAND = 'righthand'
RIGHT_HAND_END = 'righthand_Effector'
SPINE_DUMMY = 'spinedummy'
SPINE = 'spine'
SPINE1 = 'spine1'
HEAD_DUMMY = 'headdummy'
HEAD = 'head'
HEAD_END = 'head_Effector'
'''
STAND = 0
FORWARD_JUMP = 1
TAEKWONDO = 2
## Motion File
#MOTION = STAND
#MOTION = FORWARD_JUMP
MOTION = TAEKWONDO
FOOT_PART_NUM = 3
def create_vchain_5():
# motion
motion = yf.readBvhFile('vchain_5_rotate_root0.bvh', 1)
# world, model
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .8
for i in range(motion[0].skeleton.getElementNum()):
mcfg.addNode(motion[0].skeleton.getElementName(i))
node = mcfg.getNode('link0')
node.width = .3
node.mass = 6.
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 60
wcfg.timeStep = (1/30.)/stepsPerFrame
# parameter
config = {}
config['Kt'] = 20; config['Dt'] = 2*(config['Kt']**.5) # tracking gain
config['Kl'] = 1; config['Dl'] = 2*(config['Kl']**.5) # linear balance gain
config['Kh'] = 1; config['Dh'] = 2*(config['Kh']**.5) # angular balance gain
config['Ks'] = 5000; config['Ds'] = 2*(config['Ks']**.5) # penalty force spring gain
config['Bt'] = 1.
config['Bl'] = 1.
config['Bh'] = 1.
# etc
config['weightMap'] = {}
config['supLink'] = 'link0'
return motion, mcfg, wcfg, stepsPerFrame, config
def create_biped():
# motion
#motionName = 'wd2_n_kick.bvh'
if MOTION == STAND:
motionName = 'wd2_stand.bvh'
elif MOTION == FORWARD_JUMP:
motionName = 'woddy2_jump0.bvh'
elif MOTION == TAEKWONDO :
motionName = './MotionFile/wd2_098_V001.bvh'
#motionName = 'ww13_41_V001.bvh'
motion = yf.readBvhFile(motionName, .01)
yme.removeJoint(motion, HEAD, False)
yme.removeJoint(motion, RIGHT_SHOULDER, False)
yme.removeJoint(motion, LEFT_SHOULDER, False)
if FOOT_PART_NUM == 1 :
yme.removeJoint(motion, RIGHT_TOES_END, False)
yme.removeJoint(motion, LEFT_TOES_END, False)
yme.removeJoint(motion, RIGHT_HAND_END, False)
yme.removeJoint(motion, LEFT_HAND_END, False)
yme.offsetJointLocal(motion, RIGHT_ARM, (.03,-.05,0), False)
yme.offsetJointLocal(motion, LEFT_ARM, (-.03,-.05,0), False)
yme.rotateJointLocal(motion, HIP, mm.exp(mm.v3(1,0,0), .01), False)
yme.rotateJointLocal(motion, LEFT_FOOT, mm.exp(mm.v3(2.5,-0.0,.3), -.5), False)
yme.rotateJointLocal(motion, RIGHT_FOOT, mm.exp(mm.v3(2.5,0.0,-.3), -.5), False)
if MOTION == FORWARD_JUMP:
yme.rotateJointLocal(motion, LEFT_UP_LEG, mm.exp(mm.v3(0.0,.0,1.), .08), False)
yme.rotateJointLocal(motion, LEFT_LEG, mm.exp(mm.v3(0.0,1.0,0.), -.2), False)
if FOOT_PART_NUM > 1:
yme.addJoint(motion, RIGHT_FOOT, RIGHT_TARSUS)
yme.addJoint(motion, RIGHT_TARSUS, 'RIGHT_Dummy1')
yme.addJoint(motion, LEFT_FOOT, LEFT_TARSUS)
yme.addJoint(motion, LEFT_TARSUS, 'LEFT_Dummy1')
yme.rotateJointLocal(motion, LEFT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
yme.rotateJointLocal(motion, RIGHT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
if FOOT_PART_NUM == 5 :
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_L)
yme.addJoint(motion, LEFT_FOOT_SIDE_L, 'LEFT_Dummy2')
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_R)
yme.addJoint(motion, LEFT_FOOT_SIDE_R, 'LEFT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_L)
yme.addJoint(motion, RIGHT_FOOT_SIDE_L, 'RIGHT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_R)
yme.addJoint(motion, RIGHT_FOOT_SIDE_R, 'RIGHT_Dummy2')
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.updateGlobalT(motion)
################
if MOTION == FORWARD_JUMP:
motion = motion[515:555]
elif MOTION == TAEKWONDO:
## Taekwondo base-step
motion = motion[0:31]
#motion = motion[564:600]
## Taekwondo turning-kick
#motion = motion[108:-1]
#motion = motion[108:109]
motion[0:0] = [motion[0]]*100
motion.extend([motion[-1]]*5000)
# world, model
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .9
for name in massMap:
node = mcfg.addNode(name)
node.mass = massMap[name]
node = mcfg.getNode(HIP)
node.length = .2
node.width = .25
node = mcfg.getNode(SPINE1)
node.length = .2
node.offset = (0,0,0.1)
node = mcfg.getNode(SPINE)
node.width = .22
#node.length = .2 ####
if FOOT_PART_NUM == 1 :
length1 = .25
width1 = .2
mass1 = 4.
elif FOOT_PART_NUM == 3:
length1 = .1
width1 = .2
mass1 = 1.5
length2 = .1
width2 = .2
mass2 = 1.5
elif FOOT_PART_NUM == 5:
length1 = .1
width1 = .065
mass1 = .5
length2 = .1
width2 = .2
mass2 = 1.5
node = mcfg.getNode(RIGHT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
node = mcfg.getNode(LEFT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
if FOOT_PART_NUM == 5:
node = mcfg.getNode(LEFT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(LEFT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
if FOOT_PART_NUM > 1:
node = mcfg.getNode(LEFT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(RIGHT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(LEFT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
node = mcfg.getNode(RIGHT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 30
wcfg.timeStep = (1/30.)/(stepsPerFrame)
#stepsPerFrame = 10
#wcfg.timeStep = (1/120.)/(stepsPerFrame)
#wcfg.timeStep = (1/1800.)
# parameter
config = {}
config['Kt'] = 200; config['Dt'] = 2*(config['Kt']**.5) # tracking gain
config['Kl'] = .10; config['Dl'] = 2*(config['Kl']**.5) # linear balance gain
config['Kh'] = 0.1; config['Dh'] = 2*(config['Kh']**.5) # angular balance gain
config['Ks'] = 20000; config['Ds'] = 2*(config['Ks']**.5) # penalty force spring gain
config['Bt'] = 1.
config['Bl'] = 1.#0.5
config['Bh'] = 1.
if FOOT_PART_NUM == 1:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:1., LEFT_FOOT:1., HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1.}
elif FOOT_PART_NUM == 3:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.3, RIGHT_LEG:.3, LEFT_UP_LEG:.3, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3}
elif FOOT_PART_NUM == 5:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['supLink'] = LEFT_FOOT
config['supLink2'] = RIGHT_FOOT
#config['end'] = 'HIP'
config['end'] = SPINE1
config['const'] = HIP
config['root'] = HIP
config['FootPartNum'] = FOOT_PART_NUM
config['FootLPart'] = [LEFT_FOOT, LEFT_TOES, LEFT_TARSUS, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R ]
config['FootRPart'] = [RIGHT_FOOT, RIGHT_TOES, RIGHT_TARSUS, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R]
return motion, mcfg, wcfg, stepsPerFrame, config
#===============================================================================
# biped config
#===============================================================================
# motion, mesh config
g_motionDirConfigMap = {}
g_motionDirConfigMap['../Data/woody2/Motion/Physics2/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), .05), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.I_SO3()}
g_motionDirConfigMap['../Data/woody2/Motion/Balancing/'] = \
{'footRot': mm.exp(mm.v3(1,-.5,0), -.6), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.exp(mm.v3(1,0,0), .01)}
g_motionDirConfigMap['../Data/woody2/Motion/VideoMotion/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.05), 'yOffset': .01, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .0)}
g_motionDirConfigMap['../Data/woody2/Motion/Samsung/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.03), 'yOffset': .0, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .03)}
#===============================================================================
# # reloadable config
#===============================================================================
def buildMassMap():
massMap = {}
massMap = massMap.fromkeys([HEAD, HEAD_END, HIP, LEFT_ARM, LEFT_FOOT, LEFT_FORE_ARM, LEFT_HAND, LEFT_HAND_END, LEFT_LEG, LEFT_SHOULDER, LEFT_TOES, LEFT_TOES_END, LEFT_UP_LEG, RIGHT_ARM, RIGHT_FOOT, RIGHT_FORE_ARM, RIGHT_HAND, RIGHT_HAND_END, RIGHT_LEG, RIGHT_SHOULDER, RIGHT_TOES, RIGHT_TOES_END, RIGHT_UP_LEG, SPINE, SPINE1, LEFT_PHALANGE, RIGHT_PHALANGE, LEFT_TARSUS, RIGHT_TARSUS
, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R], 0.)
# torso : 10
massMap[HIP] += 2.
#massMap[SPINE] += 8.
massMap[SPINE] += 8.
# head : 3
massMap[SPINE1] += 3.
# right upper arm : 2
massMap[RIGHT_ARM] += 2.
# left upper arm : 2
massMap[LEFT_ARM] += 2.
# right lower arm : 1
#massMap[RIGHT_FORE_ARM] = 1.
massMap[RIGHT_FORE_ARM] = 2.
# left lower arm : 1
#massMap[LEFT_FORE_ARM] = 1.
massMap[LEFT_FORE_ARM] = 2.
# right thigh : 7
massMap[HIP] += 2.
massMap[RIGHT_UP_LEG] += 5.
# left thigh : 7
massMap[HIP] += 2.
massMap[LEFT_UP_LEG] += 5.
# right shin : 5
massMap[RIGHT_LEG] += 5.
# left shin : 5
massMap[LEFT_LEG] += 5.
# right foot : 4
massMap[RIGHT_FOOT] += 2.
# left foot : 4
massMap[LEFT_FOOT] += 2.
massMap[LEFT_TOES] += 2.
massMap[RIGHT_TOES] += 2.
massMap[LEFT_TARSUS] += 2.
massMap[RIGHT_TARSUS] += 2.
return massMap
massMap = buildMassMap()
| 34.904018 | 387 | 0.592249 | import copy
import sys
if './modules' not in sys.path:
sys.path.append('./modules')
import Resource.ysMotionLoader as yf
import Simulator.ysPhysConfig as ypc
import Math.mmMath as mm
import Motion.ysHierarchyEdit as yme
import Motion.ysMotion as ym
s'
RIGHT_UP_LEG = 'RightUpLeg'
RIGHT_LEG = 'RightLeg'
RIGHT_FOOT = 'RightFoot'
RIGHT_TOES = 'RightToes'
RIGHT_TOES_END = 'RightToes_Effector'
LEFT_UP_LEG = 'LeftUpLeg'
LEFT_LEG = 'LeftLeg'
LEFT_FOOT = 'LeftFoot'
LEFT_TOES = 'LeftToes'
LEFT_TOES_END = 'LeftToes_Effector'
LEFT_SHOULDER = 'LeftShoulder1'
LEFT_ARM = 'LeftArm'
LEFT_FORE_ARM = 'LeftForeArm'
LEFT_HAND = 'LeftHand'
LEFT_HAND_END = 'LeftHand_Effector'
RIGHT_SHOULDER = 'RightShoulder'
RIGHT_ARM = 'RightArm'
RIGHT_FORE_ARM = 'RightForeArm'
RIGHT_HAND = 'RightHand'
RIGHT_HAND_END = 'RightHand_Effector'
SPINE = 'Spine'
SPINE1 = 'Spine1'
HEAD = 'HEad'
HEAD_END = 'HEad_Effector'
LEFT_PHALANGE = 'LeftForeFoot'
RIGHT_PHALANGE = 'RightForeFoot'
LEFT_TARSUS = 'LeftRearFoot'
RIGHT_TARSUS = 'RightRearFoot'
LEFT_METATARSUS = 'LeftMidFoot'
RIGHT_METATARSUS = 'RightMidFoot'
LEFT_FOOT_SIDE_L = 'LeftFootSideL'
LEFT_FOOT_SIDE_R = 'LeftFootSideR'
RIGHT_FOOT_SIDE_L = 'RightFootSideL'
RIGHT_FOOT_SIDE_R = 'RightFootSideR'
STAND = 0
FORWARD_JUMP = 1
TAEKWONDO = 2
EKWONDO
FOOT_PART_NUM = 3
def create_vchain_5():
motion = yf.readBvhFile('vchain_5_rotate_root0.bvh', 1)
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .8
for i in range(motion[0].skeleton.getElementNum()):
mcfg.addNode(motion[0].skeleton.getElementName(i))
node = mcfg.getNode('link0')
node.width = .3
node.mass = 6.
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 60
wcfg.timeStep = (1/30.)/stepsPerFrame
config = {}
config['Kt'] = 20; config['Dt'] = 2*(config['Kt']**.5)
config['Kl'] = 1; config['Dl'] = 2*(config['Kl']**.5)
config['Kh'] = 1; config['Dh'] = 2*(config['Kh']**.5)
config['Ks'] = 5000; config['Ds'] = 2*(config['Ks']**.5)
config['Bt'] = 1.
config['Bl'] = 1.
config['Bh'] = 1.
config['weightMap'] = {}
config['supLink'] = 'link0'
return motion, mcfg, wcfg, stepsPerFrame, config
def create_biped():
if MOTION == STAND:
motionName = 'wd2_stand.bvh'
elif MOTION == FORWARD_JUMP:
motionName = 'woddy2_jump0.bvh'
elif MOTION == TAEKWONDO :
motionName = './MotionFile/wd2_098_V001.bvh'
motion = yf.readBvhFile(motionName, .01)
yme.removeJoint(motion, HEAD, False)
yme.removeJoint(motion, RIGHT_SHOULDER, False)
yme.removeJoint(motion, LEFT_SHOULDER, False)
if FOOT_PART_NUM == 1 :
yme.removeJoint(motion, RIGHT_TOES_END, False)
yme.removeJoint(motion, LEFT_TOES_END, False)
yme.removeJoint(motion, RIGHT_HAND_END, False)
yme.removeJoint(motion, LEFT_HAND_END, False)
yme.offsetJointLocal(motion, RIGHT_ARM, (.03,-.05,0), False)
yme.offsetJointLocal(motion, LEFT_ARM, (-.03,-.05,0), False)
yme.rotateJointLocal(motion, HIP, mm.exp(mm.v3(1,0,0), .01), False)
yme.rotateJointLocal(motion, LEFT_FOOT, mm.exp(mm.v3(2.5,-0.0,.3), -.5), False)
yme.rotateJointLocal(motion, RIGHT_FOOT, mm.exp(mm.v3(2.5,0.0,-.3), -.5), False)
if MOTION == FORWARD_JUMP:
yme.rotateJointLocal(motion, LEFT_UP_LEG, mm.exp(mm.v3(0.0,.0,1.), .08), False)
yme.rotateJointLocal(motion, LEFT_LEG, mm.exp(mm.v3(0.0,1.0,0.), -.2), False)
if FOOT_PART_NUM > 1:
yme.addJoint(motion, RIGHT_FOOT, RIGHT_TARSUS)
yme.addJoint(motion, RIGHT_TARSUS, 'RIGHT_Dummy1')
yme.addJoint(motion, LEFT_FOOT, LEFT_TARSUS)
yme.addJoint(motion, LEFT_TARSUS, 'LEFT_Dummy1')
yme.rotateJointLocal(motion, LEFT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
yme.rotateJointLocal(motion, RIGHT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
if FOOT_PART_NUM == 5 :
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_L)
yme.addJoint(motion, LEFT_FOOT_SIDE_L, 'LEFT_Dummy2')
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_R)
yme.addJoint(motion, LEFT_FOOT_SIDE_R, 'LEFT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_L)
yme.addJoint(motion, RIGHT_FOOT_SIDE_L, 'RIGHT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_R)
yme.addJoint(motion, RIGHT_FOOT_SIDE_R, 'RIGHT_Dummy2')
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.updateGlobalT(motion)
tion[0:0] = [motion[0]]*100
motion.extend([motion[-1]]*5000)
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .9
for name in massMap:
node = mcfg.addNode(name)
node.mass = massMap[name]
node = mcfg.getNode(HIP)
node.length = .2
node.width = .25
node = mcfg.getNode(SPINE1)
node.length = .2
node.offset = (0,0,0.1)
node = mcfg.getNode(SPINE)
node.width = .22
FOOT_PART_NUM == 1 :
length1 = .25
width1 = .2
mass1 = 4.
elif FOOT_PART_NUM == 3:
length1 = .1
width1 = .2
mass1 = 1.5
length2 = .1
width2 = .2
mass2 = 1.5
elif FOOT_PART_NUM == 5:
length1 = .1
width1 = .065
mass1 = .5
length2 = .1
width2 = .2
mass2 = 1.5
node = mcfg.getNode(RIGHT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
node = mcfg.getNode(LEFT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
if FOOT_PART_NUM == 5:
node = mcfg.getNode(LEFT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(LEFT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
if FOOT_PART_NUM > 1:
node = mcfg.getNode(LEFT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(RIGHT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(LEFT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
node = mcfg.getNode(RIGHT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 30
wcfg.timeStep = (1/30.)/(stepsPerFrame)
config = {}
config['Kt'] = 200; config['Dt'] = 2*(config['Kt']**.5)
config['Kl'] = .10; config['Dl'] = 2*(config['Kl']**.5)
config['Kh'] = 0.1; config['Dh'] = 2*(config['Kh']**.5)
config['Ks'] = 20000; config['Ds'] = 2*(config['Ks']**.5)
config['Bt'] = 1.
config['Bl'] = 1.
config['Bh'] = 1.
if FOOT_PART_NUM == 1:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:1., LEFT_FOOT:1., HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1.}
elif FOOT_PART_NUM == 3:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.3, RIGHT_LEG:.3, LEFT_UP_LEG:.3, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3}
elif FOOT_PART_NUM == 5:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['supLink'] = LEFT_FOOT
config['supLink2'] = RIGHT_FOOT
config['end'] = SPINE1
config['const'] = HIP
config['root'] = HIP
config['FootPartNum'] = FOOT_PART_NUM
config['FootLPart'] = [LEFT_FOOT, LEFT_TOES, LEFT_TARSUS, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R ]
config['FootRPart'] = [RIGHT_FOOT, RIGHT_TOES, RIGHT_TARSUS, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R]
return motion, mcfg, wcfg, stepsPerFrame, config
g_motionDirConfigMap = {}
g_motionDirConfigMap['../Data/woody2/Motion/Physics2/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), .05), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.I_SO3()}
g_motionDirConfigMap['../Data/woody2/Motion/Balancing/'] = \
{'footRot': mm.exp(mm.v3(1,-.5,0), -.6), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.exp(mm.v3(1,0,0), .01)}
g_motionDirConfigMap['../Data/woody2/Motion/VideoMotion/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.05), 'yOffset': .01, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .0)}
g_motionDirConfigMap['../Data/woody2/Motion/Samsung/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.03), 'yOffset': .0, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .03)}
:
massMap = {}
massMap = massMap.fromkeys([HEAD, HEAD_END, HIP, LEFT_ARM, LEFT_FOOT, LEFT_FORE_ARM, LEFT_HAND, LEFT_HAND_END, LEFT_LEG, LEFT_SHOULDER, LEFT_TOES, LEFT_TOES_END, LEFT_UP_LEG, RIGHT_ARM, RIGHT_FOOT, RIGHT_FORE_ARM, RIGHT_HAND, RIGHT_HAND_END, RIGHT_LEG, RIGHT_SHOULDER, RIGHT_TOES, RIGHT_TOES_END, RIGHT_UP_LEG, SPINE, SPINE1, LEFT_PHALANGE, RIGHT_PHALANGE, LEFT_TARSUS, RIGHT_TARSUS
, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R], 0.)
massMap[HIP] += 2.
massMap[SPINE] += 8.
massMap[SPINE1] += 3.
massMap[RIGHT_ARM] += 2.
massMap[LEFT_ARM] += 2.
massMap[RIGHT_FORE_ARM] = 2.
massMap[LEFT_FORE_ARM] = 2.
massMap[HIP] += 2.
massMap[RIGHT_UP_LEG] += 5.
massMap[HIP] += 2.
massMap[LEFT_UP_LEG] += 5.
massMap[RIGHT_LEG] += 5.
massMap[LEFT_LEG] += 5.
massMap[RIGHT_FOOT] += 2.
massMap[LEFT_FOOT] += 2.
massMap[LEFT_TOES] += 2.
massMap[RIGHT_TOES] += 2.
massMap[LEFT_TARSUS] += 2.
massMap[RIGHT_TARSUS] += 2.
return massMap
massMap = buildMassMap()
| true | true |
f7156c17c1c2dac9f185a10f4aef638483c87e61 | 976 | py | Python | core/forms.py | donnellan0007/blog | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | [
"MIT"
] | null | null | null | core/forms.py | donnellan0007/blog | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | [
"MIT"
] | null | null | null | core/forms.py | donnellan0007/blog | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class ProfileForm(UserCreationForm):
email = forms.EmailField(widget=forms.TextInput(
attrs = {
'type' : 'email',
'placeholder' : ('Email')
}
))
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
widgets = {
'username': forms.TextInput(attrs={'placeholder': 'Username'}),
'first_name': forms.TextInput(attrs={'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'placeholder': 'Last Name'}),
'email': forms.TextInput(attrs={'placeholder': 'Email'}),
}
def clean(self):
cleaned_data = super(ProfileForm,self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email') | 36.148148 | 79 | 0.609631 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class ProfileForm(UserCreationForm):
email = forms.EmailField(widget=forms.TextInput(
attrs = {
'type' : 'email',
'placeholder' : ('Email')
}
))
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
widgets = {
'username': forms.TextInput(attrs={'placeholder': 'Username'}),
'first_name': forms.TextInput(attrs={'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'placeholder': 'Last Name'}),
'email': forms.TextInput(attrs={'placeholder': 'Email'}),
}
def clean(self):
cleaned_data = super(ProfileForm,self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email') | true | true |
f7156c4bf5fa7bbe5bdf31d0caf7e0a157cf1469 | 4,015 | py | Python | tests/parser/functions/test_concat.py | Solexplorer/vyper | 135edd6a91d47c72de105066d6e6c1bdfe9ea66e | [
"MIT"
] | 1 | 2021-04-23T21:48:20.000Z | 2021-04-23T21:48:20.000Z | tests/parser/functions/test_concat.py | Solexplorer/vyper | 135edd6a91d47c72de105066d6e6c1bdfe9ea66e | [
"MIT"
] | null | null | null | tests/parser/functions/test_concat.py | Solexplorer/vyper | 135edd6a91d47c72de105066d6e6c1bdfe9ea66e | [
"MIT"
] | 1 | 2020-01-27T05:21:46.000Z | 2020-01-27T05:21:46.000Z | from vyper.exceptions import (
TypeMismatchException,
)
def test_concat(get_contract_with_gas_estimation):
test_concat = """
@public
def foo2(input1: bytes[50], input2: bytes[50]) -> bytes[1000]:
return concat(input1, input2)
@public
def foo3(input1: bytes[50], input2: bytes[50], input3: bytes[50]) -> bytes[1000]:
return concat(input1, input2, input3)
"""
c = get_contract_with_gas_estimation(test_concat)
assert c.foo2(b"h", b"orse") == b"horse"
assert c.foo2(b"h", b"") == b"h"
assert c.foo2(b"", b"") == b""
assert c.foo2(b"", b"orse") == b"orse"
assert c.foo3(b"Buffalo", b" ", b"buffalo") == b"Buffalo buffalo"
assert c.foo2(b"\x36", b"\x35" * 32) == b"\x36" + b"\x35" * 32
assert c.foo2(b"\x36" * 48, b"\x35" * 32) == b"\x36" * 48 + b"\x35" * 32
assert c.foo3(b"horses" * 4, b"mice" * 7, b"crows" * 10) == b"horses" * 4 + b"mice" * 7 + b"crows" * 10 # noqa: E501
print('Passed simple concat test')
def test_concat2(get_contract_with_gas_estimation):
test_concat2 = """
@public
def foo(inp: bytes[50]) -> bytes[1000]:
x: bytes[50] = inp
return concat(x, inp, x, inp, x, inp, x, inp, x, inp)
"""
c = get_contract_with_gas_estimation(test_concat2)
assert c.foo(b"horse" * 9 + b"vyper") == (b"horse" * 9 + b"vyper") * 10
print('Passed second concat test')
def test_crazy_concat_code(get_contract_with_gas_estimation):
crazy_concat_code = """
y: bytes[10]
@public
def krazykonkat(z: bytes[10]) -> bytes[25]:
x: bytes[3] = "cow"
self.y = "horse"
return concat(x, b" ", self.y, b" ", z)
"""
c = get_contract_with_gas_estimation(crazy_concat_code)
assert c.krazykonkat(b"moose") == b'cow horse moose'
print('Passed third concat test')
def test_concat_bytes32(get_contract_with_gas_estimation):
test_concat_bytes32 = """
@public
def sandwich(inp: bytes[100], inp2: bytes32) -> bytes[164]:
return concat(inp2, inp, inp2)
@public
def fivetimes(inp: bytes32) -> bytes[160]:
return concat(inp, inp, inp, inp, inp)
"""
c = get_contract_with_gas_estimation(test_concat_bytes32)
assert c.sandwich(b"cow", b"\x35" * 32) == b"\x35" * 32 + b"cow" + b"\x35" * 32, c.sandwich(b"cow", b"\x35" * 32) # noqa: E501
assert c.sandwich(b"", b"\x46" * 32) == b"\x46" * 64
assert c.sandwich(b"\x57" * 95, b"\x57" * 32) == b"\x57" * 159
assert c.sandwich(b"\x57" * 96, b"\x57" * 32) == b"\x57" * 160
assert c.sandwich(b"\x57" * 97, b"\x57" * 32) == b"\x57" * 161
assert c.fivetimes(b"mongoose" * 4) == b"mongoose" * 20
print("Passed concat bytes32 test")
def test_konkat_code(get_contract_with_gas_estimation):
konkat_code = """
ecks: bytes32
@public
def foo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def goo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def hoo(x: bytes32, y: bytes32) -> bytes[64]:
return concat(x, y)
"""
c = get_contract_with_gas_estimation(konkat_code)
assert c.foo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.goo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.hoo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
print('Passed second concat tests')
def test_small_output(get_contract_with_gas_estimation):
code = """
@public
def small_output(a: string[5], b: string[4]) -> string[9]:
c: string[9] = concat(a, b)
return c
"""
c = get_contract_with_gas_estimation(code)
assert c.small_output('abcde', 'fghi') == 'abcdefghi'
assert c.small_output('', '') == ''
def test_large_output(get_contract_with_gas_estimation, assert_compile_failed):
code = """
@public
def large_output(a: string[33], b: string[33]) -> string[64]:
c: string[64] = concat(a, b)
return c
"""
assert_compile_failed(
lambda: get_contract_with_gas_estimation(code),
TypeMismatchException
)
| 30.18797 | 131 | 0.62142 | from vyper.exceptions import (
TypeMismatchException,
)
def test_concat(get_contract_with_gas_estimation):
test_concat = """
@public
def foo2(input1: bytes[50], input2: bytes[50]) -> bytes[1000]:
return concat(input1, input2)
@public
def foo3(input1: bytes[50], input2: bytes[50], input3: bytes[50]) -> bytes[1000]:
return concat(input1, input2, input3)
"""
c = get_contract_with_gas_estimation(test_concat)
assert c.foo2(b"h", b"orse") == b"horse"
assert c.foo2(b"h", b"") == b"h"
assert c.foo2(b"", b"") == b""
assert c.foo2(b"", b"orse") == b"orse"
assert c.foo3(b"Buffalo", b" ", b"buffalo") == b"Buffalo buffalo"
assert c.foo2(b"\x36", b"\x35" * 32) == b"\x36" + b"\x35" * 32
assert c.foo2(b"\x36" * 48, b"\x35" * 32) == b"\x36" * 48 + b"\x35" * 32
assert c.foo3(b"horses" * 4, b"mice" * 7, b"crows" * 10) == b"horses" * 4 + b"mice" * 7 + b"crows" * 10
print('Passed simple concat test')
def test_concat2(get_contract_with_gas_estimation):
test_concat2 = """
@public
def foo(inp: bytes[50]) -> bytes[1000]:
x: bytes[50] = inp
return concat(x, inp, x, inp, x, inp, x, inp, x, inp)
"""
c = get_contract_with_gas_estimation(test_concat2)
assert c.foo(b"horse" * 9 + b"vyper") == (b"horse" * 9 + b"vyper") * 10
print('Passed second concat test')
def test_crazy_concat_code(get_contract_with_gas_estimation):
crazy_concat_code = """
y: bytes[10]
@public
def krazykonkat(z: bytes[10]) -> bytes[25]:
x: bytes[3] = "cow"
self.y = "horse"
return concat(x, b" ", self.y, b" ", z)
"""
c = get_contract_with_gas_estimation(crazy_concat_code)
assert c.krazykonkat(b"moose") == b'cow horse moose'
print('Passed third concat test')
def test_concat_bytes32(get_contract_with_gas_estimation):
test_concat_bytes32 = """
@public
def sandwich(inp: bytes[100], inp2: bytes32) -> bytes[164]:
return concat(inp2, inp, inp2)
@public
def fivetimes(inp: bytes32) -> bytes[160]:
return concat(inp, inp, inp, inp, inp)
"""
c = get_contract_with_gas_estimation(test_concat_bytes32)
assert c.sandwich(b"cow", b"\x35" * 32) == b"\x35" * 32 + b"cow" + b"\x35" * 32, c.sandwich(b"cow", b"\x35" * 32)
assert c.sandwich(b"", b"\x46" * 32) == b"\x46" * 64
assert c.sandwich(b"\x57" * 95, b"\x57" * 32) == b"\x57" * 159
assert c.sandwich(b"\x57" * 96, b"\x57" * 32) == b"\x57" * 160
assert c.sandwich(b"\x57" * 97, b"\x57" * 32) == b"\x57" * 161
assert c.fivetimes(b"mongoose" * 4) == b"mongoose" * 20
print("Passed concat bytes32 test")
def test_konkat_code(get_contract_with_gas_estimation):
konkat_code = """
ecks: bytes32
@public
def foo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def goo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def hoo(x: bytes32, y: bytes32) -> bytes[64]:
return concat(x, y)
"""
c = get_contract_with_gas_estimation(konkat_code)
assert c.foo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.goo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.hoo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
print('Passed second concat tests')
def test_small_output(get_contract_with_gas_estimation):
code = """
@public
def small_output(a: string[5], b: string[4]) -> string[9]:
c: string[9] = concat(a, b)
return c
"""
c = get_contract_with_gas_estimation(code)
assert c.small_output('abcde', 'fghi') == 'abcdefghi'
assert c.small_output('', '') == ''
def test_large_output(get_contract_with_gas_estimation, assert_compile_failed):
code = """
@public
def large_output(a: string[33], b: string[33]) -> string[64]:
c: string[64] = concat(a, b)
return c
"""
assert_compile_failed(
lambda: get_contract_with_gas_estimation(code),
TypeMismatchException
)
| true | true |
f7156ce7fd453c52f14385b72fc6a38950f75874 | 5,307 | py | Python | nicos_mlz/biodiff/setups/motor.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/biodiff/setups/motor.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_mlz/biodiff/setups/motor.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
description = 'Axes setup'
group = 'lowlevel'
tango_base = 'tango://phys.biodiff.frm2:10000/biodiff/'
devices = dict(
omega_samplestepper = device('nicos.devices.tango.Motor',
description = 'Sample stepper omega variant',
tangodevice = tango_base + 'fzjs7/omega_samplestepper',
unit = 'deg',
precision = 0.001,
),
omega_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table omega variant',
tangodevice = tango_base + 'fzjs7/omega_sampletable',
unit = 'deg',
precision = 0.001,
),
x_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/x_sampletable',
unit = 'mm',
precision = 0.005,
),
y_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table y axis',
tangodevice = tango_base + 'fzjs7/y_sampletable',
unit = 'mm',
precision = 0.005,
),
z_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/z_sampletable',
unit = 'mm',
precision = 0.005,
),
theta_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator theta variant',
tangodevice = tango_base + 'fzjs7/theta_monochromator',
unit = 'deg',
precision = 0.001,
),
tilt_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator tilt',
tangodevice = tango_base + 'fzjs7/tilt_monochromator',
unit = 'deg',
precision = 0.005,
),
x_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator x axis',
tangodevice = tango_base + 'fzjs7/x_monochromator',
unit = 'mm',
precision = 0.002,
),
y_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator y axis',
tangodevice = tango_base + 'fzjs7/y_monochromator',
unit = 'mm',
precision = 0.002,
),
z_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator z axis',
tangodevice = tango_base + 'fzjs7/z_monochromator',
unit = 'mm',
precision = 0.002,
),
theta2_selectorarm = device('nicos.devices.tango.Motor',
description = 'Selector arm 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_selectorarm',
unit = 'deg',
precision = 0.005,
),
d_diaphragm1 = device('nicos.devices.tango.Motor',
description = 'Slit 1',
tangodevice = tango_base + 'fzjs7/d_diaphragm1',
unit = 'mm',
precision = 0.05,
),
d_diaphragm2 = device('nicos.devices.tango.Motor',
description = 'Slit 2',
tangodevice = tango_base + 'fzjs7/d_diaphragm2',
unit = 'mm',
precision = 0.05,
),
theta2_detectorunit = device('nicos.devices.tango.Motor',
description = 'Detector unit 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_detectorunit',
unit = 'deg',
precision = 0.005,
),
z_imageplate = device('nicos.devices.tango.Motor',
description = 'Neutron image plate z axis',
tangodevice = tango_base + 'fzjs7/z_neutronimageplate',
unit = 'mm',
precision = 0.01,
),
z_CCD = device('nicos.devices.tango.Motor',
description = 'CCD z axis',
tangodevice = tango_base + 'fzjs7/z_CCD',
unit = 'mm',
precision = 0.01,
),
z_CCDcamera = device('nicos.devices.tango.Motor',
description = 'CCD camera z axis',
tangodevice = tango_base + 'fzjs7/z_CCDcamera',
unit = 'mm',
precision = 0.01,
),
# theta2_CCDcamera = device('nicos.devices.tango.Motor',
# description = 'CCD camera 2theta variant',
# tangodevice = tango_base + 'fzjs7/2theta_CCDcamera',
# unit = 'deg',
# precision = 0.01,
# ),
rot_scintillatorhead = device('nicos_mlz.biodiff.devices.motor.S7InterlockMotor',
description = 'Scintillator head rotation',
tangodevice = tango_base + 'fzjs7/rot_scintillatorhead',
unit = 'deg',
precision = 0.5,
),
# omega_samplegoniometer = device('nicos.devices.tango.Motor',
# description = 'Sample goniometer omega variant',
# tangodevice = tango_base + 'fzjs7/omega_samplegoniometer',
# ),
# x_samplegoniometer = device('nicos.devices.tango.Motor',
# description = 'Sample goniometer x axis',
# tangodevice = tango_base + 'fzjs7/x_samplegoniometer',
# ),
# y_samplegoniometer = device('nicos.devices.tango.Motor',
# description = 'Sample goniometer y axis',
# tangodevice = tango_base + 'fzjs7/y_samplegoniometer',
# ),
# rot_diaphragm3 = device('nicos.devices.tango.Motor',
# description = 'Slit 3',
# tangodevice = tango_base + 'fzjs7/rot_diaphragm3',
# unit = 'deg',
# ),
# rot_diaphragm4 = device('nicos.devices.tango.Motor',
# description = 'Slit 4',
# tangodevice = tango_base + 'fzjs7/rot_diaphragm4',
# unit = 'deg',
# ),
)
| 36.349315 | 85 | 0.606181 |
description = 'Axes setup'
group = 'lowlevel'
tango_base = 'tango://phys.biodiff.frm2:10000/biodiff/'
devices = dict(
omega_samplestepper = device('nicos.devices.tango.Motor',
description = 'Sample stepper omega variant',
tangodevice = tango_base + 'fzjs7/omega_samplestepper',
unit = 'deg',
precision = 0.001,
),
omega_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table omega variant',
tangodevice = tango_base + 'fzjs7/omega_sampletable',
unit = 'deg',
precision = 0.001,
),
x_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/x_sampletable',
unit = 'mm',
precision = 0.005,
),
y_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table y axis',
tangodevice = tango_base + 'fzjs7/y_sampletable',
unit = 'mm',
precision = 0.005,
),
z_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/z_sampletable',
unit = 'mm',
precision = 0.005,
),
theta_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator theta variant',
tangodevice = tango_base + 'fzjs7/theta_monochromator',
unit = 'deg',
precision = 0.001,
),
tilt_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator tilt',
tangodevice = tango_base + 'fzjs7/tilt_monochromator',
unit = 'deg',
precision = 0.005,
),
x_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator x axis',
tangodevice = tango_base + 'fzjs7/x_monochromator',
unit = 'mm',
precision = 0.002,
),
y_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator y axis',
tangodevice = tango_base + 'fzjs7/y_monochromator',
unit = 'mm',
precision = 0.002,
),
z_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator z axis',
tangodevice = tango_base + 'fzjs7/z_monochromator',
unit = 'mm',
precision = 0.002,
),
theta2_selectorarm = device('nicos.devices.tango.Motor',
description = 'Selector arm 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_selectorarm',
unit = 'deg',
precision = 0.005,
),
d_diaphragm1 = device('nicos.devices.tango.Motor',
description = 'Slit 1',
tangodevice = tango_base + 'fzjs7/d_diaphragm1',
unit = 'mm',
precision = 0.05,
),
d_diaphragm2 = device('nicos.devices.tango.Motor',
description = 'Slit 2',
tangodevice = tango_base + 'fzjs7/d_diaphragm2',
unit = 'mm',
precision = 0.05,
),
theta2_detectorunit = device('nicos.devices.tango.Motor',
description = 'Detector unit 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_detectorunit',
unit = 'deg',
precision = 0.005,
),
z_imageplate = device('nicos.devices.tango.Motor',
description = 'Neutron image plate z axis',
tangodevice = tango_base + 'fzjs7/z_neutronimageplate',
unit = 'mm',
precision = 0.01,
),
z_CCD = device('nicos.devices.tango.Motor',
description = 'CCD z axis',
tangodevice = tango_base + 'fzjs7/z_CCD',
unit = 'mm',
precision = 0.01,
),
z_CCDcamera = device('nicos.devices.tango.Motor',
description = 'CCD camera z axis',
tangodevice = tango_base + 'fzjs7/z_CCDcamera',
unit = 'mm',
precision = 0.01,
),
rot_scintillatorhead = device('nicos_mlz.biodiff.devices.motor.S7InterlockMotor',
description = 'Scintillator head rotation',
tangodevice = tango_base + 'fzjs7/rot_scintillatorhead',
unit = 'deg',
precision = 0.5,
),
)
| true | true |
f7156edb72ba4944c07e754e6e68e17a3a4c0c87 | 648 | py | Python | trade_remedies_api/organisations/migrations/0011_organisation_merged_from.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | 1 | 2020-08-13T10:37:15.000Z | 2020-08-13T10:37:15.000Z | trade_remedies_api/organisations/migrations/0011_organisation_merged_from.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | 4 | 2020-09-10T13:41:52.000Z | 2020-12-16T09:00:21.000Z | trade_remedies_api/organisations/migrations/0011_organisation_merged_from.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-11-06 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("organisations", "0010_auto_20191024_1353"),
]
operations = [
migrations.AddField(
model_name="organisation",
name="merged_from",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="merged_from_org",
to="organisations.Organisation",
),
),
]
| 24.923077 | 60 | 0.574074 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("organisations", "0010_auto_20191024_1353"),
]
operations = [
migrations.AddField(
model_name="organisation",
name="merged_from",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="merged_from_org",
to="organisations.Organisation",
),
),
]
| true | true |
f7156f2a19e53f51807d2c9be830a384fe50f7d0 | 4,368 | py | Python | tests/unit/test_clearmot.py | traffic-ai/EvalDeT | 3b52698e1b03fb9066e3203c2f36aebfa0030aba | [
"Apache-2.0"
] | 2 | 2021-12-19T21:55:12.000Z | 2021-12-19T21:55:19.000Z | tests/unit/test_clearmot.py | sasp-ai/EvalDeT | 3b52698e1b03fb9066e3203c2f36aebfa0030aba | [
"Apache-2.0"
] | 10 | 2021-08-07T09:51:27.000Z | 2021-08-29T07:26:07.000Z | tests/unit/test_clearmot.py | traffic-ai/EvalDeT | 3b52698e1b03fb9066e3203c2f36aebfa0030aba | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pytest
from evaldet import Tracks
from evaldet.mot_metrics.clearmot import calculate_clearmot_metrics
def test_missing_frame_hyp():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 0
assert metrics["IDS"] == 0
def test_missing_frame_gt():
gt = Tracks()
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 0
assert metrics["FP_CLEAR"] == 1
def test_no_association_made():
gt = Tracks()
gt.add_frame(0, [0], np.array([[10, 10, 11, 11]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == -1 # Stange but ok
assert np.isnan(metrics["MOTP"])
@pytest.mark.parametrize("threshold", [0.3, 0.5, 0.7])
def test_dist_threshold(threshold: float):
gt = Tracks()
gt.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1]]),
)
hyp = Tracks()
hyp.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 0.2], [0, 0, 1, 0.4], [0, 0, 1, 0.6], [0, 0, 1, 0.8]]),
)
fn_res = {0.3: 3, 0.5: 2, 0.7: 1}
metrics = calculate_clearmot_metrics(gt, hyp, dist_threshold=threshold)
assert fn_res[threshold] == metrics["FN_CLEAR"]
def test_sticky_association():
"""Test that as long as distance is below threshold, the association does
not switch, even if a detection with better IoU score appears.
"""
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 0
assert metrics["FP_CLEAR"] == 1
def test_mismatch():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_persistent_mismatch():
"""Test that association (and therefore mismatch) persists even
when the first matched hypothesis is gone, as long as another one
is not assigned."""
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(2, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(2, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_simple_case():
"""Test a simple case with 3 frames and 2 detections/gts per frame."""
gt = Tracks()
gt.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
gt.add_frame(1, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
gt.add_frame(2, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
hyp = Tracks()
hyp.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [1, 1, 2, 2]]))
hyp.add_frame(2, [2, 1], np.array([[0.05, 0.05, 1.05, 1.05], [2, 2, 3, 3]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == 0.5
assert metrics["MOTP"] == 0.0994008537355717
| 30.545455 | 83 | 0.565476 | import numpy as np
import pytest
from evaldet import Tracks
from evaldet.mot_metrics.clearmot import calculate_clearmot_metrics
def test_missing_frame_hyp():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 0
assert metrics["IDS"] == 0
def test_missing_frame_gt():
gt = Tracks()
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 0
assert metrics["FP_CLEAR"] == 1
def test_no_association_made():
gt = Tracks()
gt.add_frame(0, [0], np.array([[10, 10, 11, 11]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == -1
assert np.isnan(metrics["MOTP"])
@pytest.mark.parametrize("threshold", [0.3, 0.5, 0.7])
def test_dist_threshold(threshold: float):
gt = Tracks()
gt.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1]]),
)
hyp = Tracks()
hyp.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 0.2], [0, 0, 1, 0.4], [0, 0, 1, 0.6], [0, 0, 1, 0.8]]),
)
fn_res = {0.3: 3, 0.5: 2, 0.7: 1}
metrics = calculate_clearmot_metrics(gt, hyp, dist_threshold=threshold)
assert fn_res[threshold] == metrics["FN_CLEAR"]
def test_sticky_association():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 0
assert metrics["FP_CLEAR"] == 1
def test_mismatch():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_persistent_mismatch():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(2, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(2, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_simple_case():
gt = Tracks()
gt.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
gt.add_frame(1, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
gt.add_frame(2, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
hyp = Tracks()
hyp.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [1, 1, 2, 2]]))
hyp.add_frame(2, [2, 1], np.array([[0.05, 0.05, 1.05, 1.05], [2, 2, 3, 3]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == 0.5
assert metrics["MOTP"] == 0.0994008537355717
| true | true |
f7157086f3990ba862350c2dc2e8610185bd0247 | 1,377 | py | Python | transcript/transcript/urls.py | Harrymissi/transcript-system | c7c3a8e505e4e8e5ca6ab5f934338bb8ff314260 | [
"Apache-2.0"
] | 1 | 2019-02-25T23:17:18.000Z | 2019-02-25T23:17:18.000Z | transcript/transcript/urls.py | Harrymissi/transcript-system | c7c3a8e505e4e8e5ca6ab5f934338bb8ff314260 | [
"Apache-2.0"
] | null | null | null | transcript/transcript/urls.py | Harrymissi/transcript-system | c7c3a8e505e4e8e5ca6ab5f934338bb8ff314260 | [
"Apache-2.0"
] | null | null | null | """transcript URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import xadmin
from django.views.generic import TemplateView
from trans_sys.views import user_login, user_info, user_course, user_GPA, user_transcript, changeProfile, change_password
urlpatterns = [
path('xadmin/', xadmin.site.urls),
path('index/',TemplateView.as_view(template_name="index.html"),name = "index"),
path('login/', user_login, name="login"),
path('info/',user_info,name="info"),
path('course/',user_course,name="course"),
path('gpa/', user_GPA, name="gpa"),
path('transcript/', user_transcript, name="transcript"),
path('changeProfile', changeProfile, name="changeProfile"),
path('changePWD', change_password, name="changePWD" ),
] | 43.03125 | 121 | 0.718228 | from django.contrib import admin
from django.urls import path
import xadmin
from django.views.generic import TemplateView
from trans_sys.views import user_login, user_info, user_course, user_GPA, user_transcript, changeProfile, change_password
urlpatterns = [
path('xadmin/', xadmin.site.urls),
path('index/',TemplateView.as_view(template_name="index.html"),name = "index"),
path('login/', user_login, name="login"),
path('info/',user_info,name="info"),
path('course/',user_course,name="course"),
path('gpa/', user_GPA, name="gpa"),
path('transcript/', user_transcript, name="transcript"),
path('changeProfile', changeProfile, name="changeProfile"),
path('changePWD', change_password, name="changePWD" ),
] | true | true |
f7157154f136fad7994d2221db333cf67ad7e9d1 | 6,774 | py | Python | samples/client/petstore/python-experimental/petstore_api/models/grandparent_animal.py | jonnii/openapi-generator | b828860614df0b5207761c2a34c6a002fb56419b | [
"Apache-2.0"
] | 1 | 2021-01-26T15:23:10.000Z | 2021-01-26T15:23:10.000Z | samples/client/petstore/python-experimental/petstore_api/models/grandparent_animal.py | jonnii/openapi-generator | b828860614df0b5207761c2a34c6a002fb56419b | [
"Apache-2.0"
] | 5 | 2021-03-10T19:39:24.000Z | 2022-02-27T05:24:35.000Z | samples/client/petstore/python-experimental/petstore_api/models/grandparent_animal.py | jonnii/openapi-generator | b828860614df0b5207761c2a34c6a002fb56419b | [
"Apache-2.0"
] | 2 | 2020-08-06T08:52:02.000Z | 2021-05-06T09:22:11.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.models import child_cat
except ImportError:
child_cat = sys.modules[
'petstore_api.models.child_cat']
try:
from petstore_api.models import child_dog
except ImportError:
child_dog = sys.modules[
'petstore_api.models.child_dog']
try:
from petstore_api.models import child_lizard
except ImportError:
child_lizard = sys.modules[
'petstore_api.models.child_lizard']
try:
from petstore_api.models import parent_pet
except ImportError:
parent_pet = sys.modules[
'petstore_api.models.parent_pet']
class GrandparentAnimal(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'pet_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
'ChildCat': child_cat.ChildCat,
'ChildDog': child_dog.ChildDog,
'ChildLizard': child_lizard.ChildLizard,
'ParentPet': parent_pet.ParentPet,
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, pet_type, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs): # noqa: E501
"""grandparent_animal.GrandparentAnimal - a model defined in OpenAPI
Args:
pet_type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.pet_type = pet_type
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 36.815217 | 174 | 0.602303 |
from __future__ import absolute_import
import re
import sys
import six
import nulltype
from petstore_api.model_utils import (
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.models import child_cat
except ImportError:
child_cat = sys.modules[
'petstore_api.models.child_cat']
try:
from petstore_api.models import child_dog
except ImportError:
child_dog = sys.modules[
'petstore_api.models.child_dog']
try:
from petstore_api.models import child_lizard
except ImportError:
child_lizard = sys.modules[
'petstore_api.models.child_lizard']
try:
from petstore_api.models import parent_pet
except ImportError:
parent_pet = sys.modules[
'petstore_api.models.parent_pet']
class GrandparentAnimal(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
return {
'pet_type': (str,),
}
@cached_property
def discriminator():
val = {
'ChildCat': child_cat.ChildCat,
'ChildDog': child_dog.ChildDog,
'ChildLizard': child_lizard.ChildLizard,
'ParentPet': parent_pet.ParentPet,
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, pet_type, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs):
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.pet_type = pet_type
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true | true |
f715723718b976ccc5b5a3dc7091ae07ddbd3d22 | 3,657 | py | Python | plex/utils/datasource/thetvdb.py | spuriousdata/plex-cli | f2561d1a68917edbc9bbcce39a9548da86d2d7ef | [
"MIT"
] | 1 | 2018-03-26T20:06:57.000Z | 2018-03-26T20:06:57.000Z | plex/utils/datasource/thetvdb.py | spuriousdata/plex-cli | f2561d1a68917edbc9bbcce39a9548da86d2d7ef | [
"MIT"
] | null | null | null | plex/utils/datasource/thetvdb.py | spuriousdata/plex-cli | f2561d1a68917edbc9bbcce39a9548da86d2d7ef | [
"MIT"
] | null | null | null | import json
import requests
class TVDBHttpException(Exception):
pass
class TVDB(object):
base = 'https://api.thetvdb.com'
def __init__(self, apikey=None, username=None, userkey=None):
self.username = username
self.userkey = userkey
self.apikey = apikey
self.authenticate()
def __get_url(self, path):
return self.base + '/' + path
def authenticate(self):
data = {
'apikey': self.apikey,
}
if self.username and self.userkey:
data.update({
'username': self.username,
'userkey': self.userkey,
})
response = requests.post(self.__get_url('login'),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
},
data=json.dumps(data))
rdata = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on login: %s" % rdata.get('Error', 'Unknown Error'))
self.__authtok = rdata['token']
def search(self, **kwargs):
response = requests.get(self.__get_url('search/series'),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params=kwargs
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def series_query(self, series=0, season=0):
response = requests.get(self.__get_url('series/{id}/episodes/query'.format(id=series)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params={'airedSeason': season}
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def episode(self, id=0):
response = requests.get(self.__get_url('episodes/{id}'.format(id=id)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
})
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
if __name__ == '__main__':
import sys
from pprint import pprint as pp
from argparse import ArgumentParser
from plex.utils.utils import s2d
parser = ArgumentParser()
parser.add_argument('-u', '--username', help='username')
parser.add_argument('-k', '--userkey', help='userkey')
parser.add_argument('-a', '--apikey', help='apikey', required=True)
parser.add_argument('ACTION', help='what to do')
parser.add_argument('ACTION_ARGS', help='key=val,key2=val2')
args = parser.parse_args(sys.argv[1:])
t = TVDB(args.apikey, args.username, args.userkey)
pp(getattr(t, args.ACTION)(**s2d(args.ACTION_ARGS)))
| 38.904255 | 106 | 0.508067 | import json
import requests
class TVDBHttpException(Exception):
pass
class TVDB(object):
base = 'https://api.thetvdb.com'
def __init__(self, apikey=None, username=None, userkey=None):
self.username = username
self.userkey = userkey
self.apikey = apikey
self.authenticate()
def __get_url(self, path):
return self.base + '/' + path
def authenticate(self):
data = {
'apikey': self.apikey,
}
if self.username and self.userkey:
data.update({
'username': self.username,
'userkey': self.userkey,
})
response = requests.post(self.__get_url('login'),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
},
data=json.dumps(data))
rdata = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on login: %s" % rdata.get('Error', 'Unknown Error'))
self.__authtok = rdata['token']
def search(self, **kwargs):
response = requests.get(self.__get_url('search/series'),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params=kwargs
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def series_query(self, series=0, season=0):
response = requests.get(self.__get_url('series/{id}/episodes/query'.format(id=series)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params={'airedSeason': season}
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def episode(self, id=0):
response = requests.get(self.__get_url('episodes/{id}'.format(id=id)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
})
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
if __name__ == '__main__':
import sys
from pprint import pprint as pp
from argparse import ArgumentParser
from plex.utils.utils import s2d
parser = ArgumentParser()
parser.add_argument('-u', '--username', help='username')
parser.add_argument('-k', '--userkey', help='userkey')
parser.add_argument('-a', '--apikey', help='apikey', required=True)
parser.add_argument('ACTION', help='what to do')
parser.add_argument('ACTION_ARGS', help='key=val,key2=val2')
args = parser.parse_args(sys.argv[1:])
t = TVDB(args.apikey, args.username, args.userkey)
pp(getattr(t, args.ACTION)(**s2d(args.ACTION_ARGS)))
| true | true |
f71572afcd687fc4a51638572448889091aac7fe | 615 | py | Python | wework/migrations/0001_initial.py | edsion1107/pytest_backend | 59caf69226b821497ee19673630226df24d34391 | [
"BSD-3-Clause"
] | null | null | null | wework/migrations/0001_initial.py | edsion1107/pytest_backend | 59caf69226b821497ee19673630226df24d34391 | [
"BSD-3-Clause"
] | 3 | 2020-02-11T23:52:19.000Z | 2021-06-10T21:19:50.000Z | wework/migrations/0001_initial.py | edsion1107/pytest_backend | 59caf69226b821497ee19673630226df24d34391 | [
"BSD-3-Clause"
] | 1 | 2020-11-28T15:25:03.000Z | 2020-11-28T15:25:03.000Z | # Generated by Django 2.1.7 on 2019-02-26 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create', models.DateTimeField(auto_created=True)),
('key', models.CharField(max_length=512)),
('expires_in', models.DateTimeField()),
],
),
]
| 25.625 | 114 | 0.573984 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create', models.DateTimeField(auto_created=True)),
('key', models.CharField(max_length=512)),
('expires_in', models.DateTimeField()),
],
),
]
| true | true |
f71573d18019e66119ed0720c4b4edddc4c1a5eb | 987 | py | Python | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject # noqa: E501
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
"""OrderReconcileReturnObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrderReconcileReturnObject(self):
"""Test OrderReconcileReturnObject"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.order_reconcile_return_object.OrderReconcileReturnObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.073171 | 109 | 0.733536 |
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOrderReconcileReturnObject(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f71573ed4848e9b29e823af2889eea2f2d9b5fc1 | 822 | py | Python | inventory/home/view.py | Rohitkuru/Smart-Linux-Box | 0cc1b0c4ebc9edb35b2ba64b51f451d36af87304 | [
"MIT"
] | null | null | null | inventory/home/view.py | Rohitkuru/Smart-Linux-Box | 0cc1b0c4ebc9edb35b2ba64b51f451d36af87304 | [
"MIT"
] | 1 | 2021-03-07T07:59:47.000Z | 2021-03-07T07:59:47.000Z | inventory/home/view.py | Rohitkuru/dynamic-linux-inventory | 0cc1b0c4ebc9edb35b2ba64b51f451d36af87304 | [
"MIT"
] | null | null | null | from flask import Blueprint,render_template,request,flash
from inventory.backend.scripts import scan
from inventory.Crud.operation import add_record
from inventory.models import *
home = Blueprint("home_view",__name__)
@home.route("/",methods = ['GET','POST'])
def home_view():
if request.method == "POST":
find_result = scan(request.form['range'])
if type(find_result) == list:
flash("Search completed and Inventory updated")
add_record(find_result,request.form['range'])
return render_template("home.html",find_result=Linux_inventory.query.all())
else:
flash(find_result)
return render_template("home.html",find_result=Linux_inventory.query.all())
return render_template("home.html",find_result=Linux_inventory.query.all())
| 37.363636 | 87 | 0.70073 | from flask import Blueprint,render_template,request,flash
from inventory.backend.scripts import scan
from inventory.Crud.operation import add_record
from inventory.models import *
home = Blueprint("home_view",__name__)
@home.route("/",methods = ['GET','POST'])
def home_view():
if request.method == "POST":
find_result = scan(request.form['range'])
if type(find_result) == list:
flash("Search completed and Inventory updated")
add_record(find_result,request.form['range'])
return render_template("home.html",find_result=Linux_inventory.query.all())
else:
flash(find_result)
return render_template("home.html",find_result=Linux_inventory.query.all())
return render_template("home.html",find_result=Linux_inventory.query.all())
| true | true |
f7157414e7e3ec2bdef8398e48beb4165dba07b9 | 16,669 | py | Python | MAML-ADML/meta.py | robustmetalearning/robust-meta-learning | 08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81 | [
"MIT"
] | null | null | null | MAML-ADML/meta.py | robustmetalearning/robust-meta-learning | 08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81 | [
"MIT"
] | null | null | null | MAML-ADML/meta.py | robustmetalearning/robust-meta-learning | 08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from learner import Learner
from copy import deepcopy
def zero_nontrainable_grads(grads, trainable_layers=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]):
for index, grad_tensor in enumerate(grads):
if index not in trainable_layers:
grad_tensor = torch.zeros_like(grad_tensor)
def inputsPGD(metalearner, net, inputs, targets, params = False, evaluate = False):
if evaluate:
attack_steps = metalearner.eval_attack_steps
else:
attack_steps = metalearner.attack_steps
x = inputs.detach()
if not metalearner.no_random_start:
x = x + torch.zeros_like(x).uniform_(-metalearner.attack_epsilon, metalearner.attack_epsilon)
for i in range(attack_steps):
x.requires_grad_()
with torch.enable_grad():
if params:
loss = F.cross_entropy(net(x, params), targets, size_average=False)
else:
loss = F.cross_entropy(net(x), targets, size_average=False)
grad = torch.autograd.grad(loss, [x])[0]
if metalearner.targeted:
x = x.detach() - metalearner.attack_step_size*torch.sign(grad.detach())
else:
x = x.detach() + metalearner.attack_step_size*torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs - metalearner.attack_epsilon), inputs + metalearner.attack_epsilon)
x = torch.clamp(x, 0.0, 1.0)
return x
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.finetune_trainable = args.finetune_trainable
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.attack_query = args.attack_query
self.attack_support = args.attack_support
self.no_attack_validation = args.no_attack_validation
self.attack_epsilon = args.attack_epsilon
self.attack_step_size = args.attack_step_size
self.attack_steps = args.attack_steps
self.eval_attack_steps = args.eval_attack_steps
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
self.no_random_start = args.no_random_start
self.targeted = args.targeted
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
# 1. run the i-th task and compute loss for k=0
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i]), vars=None, bn_training=True)
else:
logits = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
if self.attack_query:
logits_q = self.net(inputsPGD(self, self.net, x_qry[i], y_qry[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
print('Validating...')
querysz = x_qry.size(0)
natural_corrects = [0 for _ in range(self.update_step_test + 1)]
robust_corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[0] = natural_corrects[0] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[0] = robust_corrects[0] + robust_correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[1] = natural_corrects[1] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[1] = robust_corrects[1] + robust_correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item() # convert to numpy
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs = np.array(natural_corrects) / querysz
robust_accs = np.array(robust_corrects) / querysz
########################### DO THE SAME THING BUT ADVERSARIALLY TRAINED ON SUPPORT ########################
natural_corrects = [0 for _ in range(self.update_step_test + 1)]
robust_corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits = net(inputsPGD(self, net, x_spt, y_spt), bn_training=True)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[0] = natural_corrects[0] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[0] = robust_corrects[0] + robust_correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[1] = natural_corrects[1] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[1] = robust_corrects[1] + robust_correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits = net(inputsPGD(self, net, x_spt, y_spt, params = fast_weights), fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item() # convert to numpy
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs_advTrained = np.array(natural_corrects) / querysz
robust_accs_advTrained = np.array(robust_corrects) / querysz
return natural_accs, robust_accs, natural_accs_advTrained, robust_accs_advTrained
def main():
pass
if __name__ == '__main__':
main()
| 43.183938 | 141 | 0.599736 | import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from learner import Learner
from copy import deepcopy
def zero_nontrainable_grads(grads, trainable_layers=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]):
for index, grad_tensor in enumerate(grads):
if index not in trainable_layers:
grad_tensor = torch.zeros_like(grad_tensor)
def inputsPGD(metalearner, net, inputs, targets, params = False, evaluate = False):
if evaluate:
attack_steps = metalearner.eval_attack_steps
else:
attack_steps = metalearner.attack_steps
x = inputs.detach()
if not metalearner.no_random_start:
x = x + torch.zeros_like(x).uniform_(-metalearner.attack_epsilon, metalearner.attack_epsilon)
for i in range(attack_steps):
x.requires_grad_()
with torch.enable_grad():
if params:
loss = F.cross_entropy(net(x, params), targets, size_average=False)
else:
loss = F.cross_entropy(net(x), targets, size_average=False)
grad = torch.autograd.grad(loss, [x])[0]
if metalearner.targeted:
x = x.detach() - metalearner.attack_step_size*torch.sign(grad.detach())
else:
x = x.detach() + metalearner.attack_step_size*torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs - metalearner.attack_epsilon), inputs + metalearner.attack_epsilon)
x = torch.clamp(x, 0.0, 1.0)
return x
class Meta(nn.Module):
def __init__(self, args, config):
super(Meta, self).__init__()
self.finetune_trainable = args.finetune_trainable
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.attack_query = args.attack_query
self.attack_support = args.attack_support
self.no_attack_validation = args.no_attack_validation
self.attack_epsilon = args.attack_epsilon
self.attack_step_size = args.attack_step_size
self.attack_steps = args.attack_steps
self.eval_attack_steps = args.eval_attack_steps
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
self.no_random_start = args.no_random_start
self.targeted = args.targeted
def clip_grad_by_norm_(self, grad, max_norm):
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)]
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i]), vars=None, bn_training=True)
else:
logits = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
with torch.no_grad():
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
with torch.no_grad():
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
if self.attack_query:
logits_q = self.net(inputsPGD(self, self.net, x_qry[i], y_qry[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[k + 1] = corrects[k + 1] + correct
loss_q = losses_q[-1] / task_num
self.meta_optim.zero_grad()
loss_q.backward()
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
assert len(x_spt.shape) == 4
print('Validating...')
querysz = x_qry.size(0)
natural_corrects = [0 for _ in range(self.update_step_test + 1)]
robust_corrects = [0 for _ in range(self.update_step_test + 1)]
net = deepcopy(self.net)
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
with torch.no_grad():
logits_q = net(x_qry, net.parameters(), bn_training=True)
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[0] = natural_corrects[0] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[0] = robust_corrects[0] + robust_correct
with torch.no_grad():
logits_q = net(x_qry, fast_weights, bn_training=True)
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[1] = natural_corrects[1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[1] = robust_corrects[1] + robust_correct
for k in range(1, self.update_step_test):
logits = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs = np.array(natural_corrects) / querysz
robust_accs = np.array(robust_corrects) / querysz
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs_advTrained = np.array(natural_corrects) / querysz
robust_accs_advTrained = np.array(robust_corrects) / querysz
return natural_accs, robust_accs, natural_accs_advTrained, robust_accs_advTrained
def main():
pass
if __name__ == '__main__':
main()
| true | true |
f715745078d64aff302f2395177ab959a49111ab | 3,310 | py | Python | steps/nnet3/train.py | ondrejklejch/learning_to_adapt | 6de0b98370769596da16a1688582925ea2e1fa29 | [
"Apache-2.0"
] | 18 | 2019-10-24T04:42:16.000Z | 2021-11-24T03:07:59.000Z | steps/nnet3/train.py | choko/learning_to_adapt | 6de0b98370769596da16a1688582925ea2e1fa29 | [
"Apache-2.0"
] | null | null | null | steps/nnet3/train.py | choko/learning_to_adapt | 6de0b98370769596da16a1688582925ea2e1fa29 | [
"Apache-2.0"
] | 4 | 2018-08-31T01:08:50.000Z | 2019-05-10T12:12:57.000Z | import sys
import numpy as np
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras.models import Model
from keras.layers import Input, Activation, Conv1D, BatchNormalization
from keras.optimizers import Adam
from learning_to_adapt.model import LHUC, Renorm
from learning_to_adapt.utils import load_dataset, load_utt_to_spk, load_utt_to_pdfs, load_lda
import keras
import tensorflow as tf
config = tf.ConfigProto()
config.intra_op_parallelism_threads=1
config.inter_op_parallelism_threads=1
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
def create_model(hidden_dim=350, lda_path=None):
lda, bias = load_lda(lda_path)
lda = lda.reshape((5, 40, 200))
feats = Input(shape=(None, 40))
x = Conv1D(200, kernel_size=5, name="lda", trainable=False, weights=[lda, bias])(feats)
layers = [(1, 1), (2, 3), (2, 6), (2, 9), (2, 6), (1, 1)]
for i, (kernel_size, dilation_rate) in enumerate(layers):
name = "tdnn%d" % (i + 1)
x = Conv1D(hidden_dim, kernel_size=kernel_size, dilation_rate=dilation_rate, activation="relu", name="%s.affine" % name)(x)
x = BatchNormalization(name="%s.batchnorm" % name)(x)
x = LHUC(name="lhuc.%s" % name, trainable=False)(x)
y = Conv1D(4208, kernel_size=1, activation="softmax", name="output.affine")(x)
return Model(inputs=[feats], outputs=[y])
if __name__ == '__main__':
train_data = sys.argv[1]
val_data = sys.argv[2]
utt2spk = sys.argv[3]
pdfs = sys.argv[4]
left_context = int(sys.argv[5])
right_context = int(sys.argv[6])
lda_path = sys.argv[7]
output_path = sys.argv[8]
num_epochs = 400
batch_size = 256
learning_rate = 0.0015
utt_to_spk = load_utt_to_spk(utt2spk)
utt_to_pdfs = load_utt_to_pdfs(pdfs)
train_dataset = load_dataset(train_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(1024)
x, _, y = train_dataset.make_one_shot_iterator().get_next()
val_dataset = load_dataset(val_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.take(512).cache().repeat()
val_x, _, val_y = val_dataset.make_one_shot_iterator().get_next()
model = create_model(600, lda_path)
model.compile(
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
optimizer=Adam(lr=learning_rate, amsgrad=True, clipvalue=1.)
)
callbacks = [
CSVLogger(output_path + "model.csv"),
ModelCheckpoint(filepath=output_path + "model.{epoch:02d}.h5", save_best_only=False, period=10),
ModelCheckpoint(filepath=output_path + "model.best.h5", save_best_only=True),
LearningRateScheduler(lambda epoch, lr: learning_rate - epoch * (learning_rate - learning_rate / 10) / num_epochs, verbose=0)
]
model.fit(x, y,
steps_per_epoch=2000,
epochs=num_epochs,
validation_data=(val_x, val_y),
validation_steps=512,
callbacks=callbacks
)
| 37.613636 | 161 | 0.710876 | import sys
import numpy as np
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras.models import Model
from keras.layers import Input, Activation, Conv1D, BatchNormalization
from keras.optimizers import Adam
from learning_to_adapt.model import LHUC, Renorm
from learning_to_adapt.utils import load_dataset, load_utt_to_spk, load_utt_to_pdfs, load_lda
import keras
import tensorflow as tf
config = tf.ConfigProto()
config.intra_op_parallelism_threads=1
config.inter_op_parallelism_threads=1
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
def create_model(hidden_dim=350, lda_path=None):
lda, bias = load_lda(lda_path)
lda = lda.reshape((5, 40, 200))
feats = Input(shape=(None, 40))
x = Conv1D(200, kernel_size=5, name="lda", trainable=False, weights=[lda, bias])(feats)
layers = [(1, 1), (2, 3), (2, 6), (2, 9), (2, 6), (1, 1)]
for i, (kernel_size, dilation_rate) in enumerate(layers):
name = "tdnn%d" % (i + 1)
x = Conv1D(hidden_dim, kernel_size=kernel_size, dilation_rate=dilation_rate, activation="relu", name="%s.affine" % name)(x)
x = BatchNormalization(name="%s.batchnorm" % name)(x)
x = LHUC(name="lhuc.%s" % name, trainable=False)(x)
y = Conv1D(4208, kernel_size=1, activation="softmax", name="output.affine")(x)
return Model(inputs=[feats], outputs=[y])
if __name__ == '__main__':
train_data = sys.argv[1]
val_data = sys.argv[2]
utt2spk = sys.argv[3]
pdfs = sys.argv[4]
left_context = int(sys.argv[5])
right_context = int(sys.argv[6])
lda_path = sys.argv[7]
output_path = sys.argv[8]
num_epochs = 400
batch_size = 256
learning_rate = 0.0015
utt_to_spk = load_utt_to_spk(utt2spk)
utt_to_pdfs = load_utt_to_pdfs(pdfs)
train_dataset = load_dataset(train_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(1024)
x, _, y = train_dataset.make_one_shot_iterator().get_next()
val_dataset = load_dataset(val_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.take(512).cache().repeat()
val_x, _, val_y = val_dataset.make_one_shot_iterator().get_next()
model = create_model(600, lda_path)
model.compile(
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
optimizer=Adam(lr=learning_rate, amsgrad=True, clipvalue=1.)
)
callbacks = [
CSVLogger(output_path + "model.csv"),
ModelCheckpoint(filepath=output_path + "model.{epoch:02d}.h5", save_best_only=False, period=10),
ModelCheckpoint(filepath=output_path + "model.best.h5", save_best_only=True),
LearningRateScheduler(lambda epoch, lr: learning_rate - epoch * (learning_rate - learning_rate / 10) / num_epochs, verbose=0)
]
model.fit(x, y,
steps_per_epoch=2000,
epochs=num_epochs,
validation_data=(val_x, val_y),
validation_steps=512,
callbacks=callbacks
)
| true | true |
f71574ae5ca34081f8ffb4a0fb83b14cf338b46f | 2,364 | py | Python | tests/test_euromil.py | rse01/pyeuromil | 17f7c800f6f10289d3211bd9d783d1f516594f6c | [
"MIT"
] | null | null | null | tests/test_euromil.py | rse01/pyeuromil | 17f7c800f6f10289d3211bd9d783d1f516594f6c | [
"MIT"
] | null | null | null | tests/test_euromil.py | rse01/pyeuromil | 17f7c800f6f10289d3211bd9d783d1f516594f6c | [
"MIT"
] | null | null | null | """ Unit tests for euromil.py """
from datetime import date
import pytest
from pyeuromil import euro_results, euro_draw_dates, euro_stats
def test_euromil_results_year_not_exist():
""" results of year test (year does not exists) """
with pytest.raises(ValueError):
results = euro_results("abcd")
assert results is None
results = euro_results(1920)
assert results is None
results = euro_results(2999)
assert results is None
def test_euromil_results_invalid_date():
""" results method (invalid date) """
with pytest.raises(ValueError):
results = euro_results("111")
assert results is None
with pytest.raises(ValueError):
results = euro_results(date(2011, 1, 1), "111")
assert results is None
def test_euromil_results_no_param():
""" results method (no param) """
results = euro_results()
assert results[0].date.year == 2011
assert results[-1].date.year == 2020
def test_euromil_results_start_date_only():
""" results method (start_date only) """
results = euro_results(date(2012, 12, 12))
assert results[0].date == date(2012, 12, 28)
assert results[-1].date > date(2018, 1, 1)
def test_euromil_results_both_dates_empty():
""" results method (both dates, no results) """
results = euro_results(date(2012, 12, 12), date(2012, 12, 13))
assert results == []
def test_euromil_results_both_dates_wrong_order():
""" results method (end_date < start_date) """
results = euro_results(date(2018, 12, 12), date(2011, 12, 13))
assert results == []
def test_euromil_results_both_dates_one_result():
""" results method (end_date < start_date) """
results = euro_results(date(2018, 10, 18), date(2018, 10, 20))
assert len(results) == 1
assert results[0].numbers[0] == 1
assert results[0].stars[0] == 3
def test_euromil_draw_dates():
""" test draw_dates method """
assert date(2018, 10, 19) in euro_draw_dates()
assert date(2011, 6, 3) in euro_draw_dates(date(2011, 1, 1), date(2011, 12, 31))
assert date(2013, 11, 15) in euro_draw_dates(date(2013, 10, 30), date(2013, 11, 15))
def test_euromil_stats():
""" test euro_stats method """
stats = euro_stats(date(2017, 10, 27), date(2018, 10, 27))
assert (stats["st4"]) == 25
assert (stats["15"]) == 17
| 29.924051 | 88 | 0.661168 | from datetime import date
import pytest
from pyeuromil import euro_results, euro_draw_dates, euro_stats
def test_euromil_results_year_not_exist():
with pytest.raises(ValueError):
results = euro_results("abcd")
assert results is None
results = euro_results(1920)
assert results is None
results = euro_results(2999)
assert results is None
def test_euromil_results_invalid_date():
with pytest.raises(ValueError):
results = euro_results("111")
assert results is None
with pytest.raises(ValueError):
results = euro_results(date(2011, 1, 1), "111")
assert results is None
def test_euromil_results_no_param():
results = euro_results()
assert results[0].date.year == 2011
assert results[-1].date.year == 2020
def test_euromil_results_start_date_only():
results = euro_results(date(2012, 12, 12))
assert results[0].date == date(2012, 12, 28)
assert results[-1].date > date(2018, 1, 1)
def test_euromil_results_both_dates_empty():
results = euro_results(date(2012, 12, 12), date(2012, 12, 13))
assert results == []
def test_euromil_results_both_dates_wrong_order():
results = euro_results(date(2018, 12, 12), date(2011, 12, 13))
assert results == []
def test_euromil_results_both_dates_one_result():
results = euro_results(date(2018, 10, 18), date(2018, 10, 20))
assert len(results) == 1
assert results[0].numbers[0] == 1
assert results[0].stars[0] == 3
def test_euromil_draw_dates():
assert date(2018, 10, 19) in euro_draw_dates()
assert date(2011, 6, 3) in euro_draw_dates(date(2011, 1, 1), date(2011, 12, 31))
assert date(2013, 11, 15) in euro_draw_dates(date(2013, 10, 30), date(2013, 11, 15))
def test_euromil_stats():
stats = euro_stats(date(2017, 10, 27), date(2018, 10, 27))
assert (stats["st4"]) == 25
assert (stats["15"]) == 17
| true | true |
f71575f5748372d7306937a1f31ad94c872397b7 | 16,014 | py | Python | nemo/collections/nlp/data/data_utils/data_preprocessing.py | madhukarkm/NeMo | 648c97f076147684bee6aaada209f2f20adcaf5d | [
"Apache-2.0"
] | 4,145 | 2019-09-13T08:29:43.000Z | 2022-03-31T18:31:44.000Z | nemo/collections/nlp/data/data_utils/data_preprocessing.py | madhukarkm/NeMo | 648c97f076147684bee6aaada209f2f20adcaf5d | [
"Apache-2.0"
] | 2,031 | 2019-09-17T16:51:39.000Z | 2022-03-31T23:52:41.000Z | nemo/collections/nlp/data/data_utils/data_preprocessing.py | madhukarkm/NeMo | 648c97f076147684bee6aaada209f2f20adcaf5d | [
"Apache-2.0"
] | 1,041 | 2019-09-13T10:08:21.000Z | 2022-03-30T06:37:38.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import random
import re
import string
from collections import Counter
import numpy as np
import torch
from tqdm.auto import tqdm
from nemo.utils import logging
from nemo.utils.env_var_parsing import get_envint
__all__ = [
'DataProcessor',
'get_label_stats',
'partition_data',
'write_files',
'write_data',
'create_dataset',
'read_csv',
'get_dataset',
'partition',
'map_entities',
'get_entities',
'get_data',
'reverse_dict',
'get_intent_labels',
'get_stats',
'DATABASE_EXISTS_TMP',
'MODE_EXISTS_TMP',
'is_whitespace',
'write_vocab',
'if_exist',
'remove_punctuation_from_sentence',
'dataset_to_ids',
'get_freq_weights',
'fill_class_weights',
'normalize_answer',
'get_labels_to_labels_id_mapping',
'get_vocab',
'find_newlines',
'load_data_indices',
'chinese_punctuation',
'check_chinese_char',
'normalize_chinese_answer',
]
DATABASE_EXISTS_TMP = '{} dataset has already been processed and stored at {}'
MODE_EXISTS_TMP = '{} mode of {} dataset has already been processed and stored at {}'
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
chinese_punctuation = {
'——',
'‘',
'’',
'“',
'”',
'…',
'、',
'。',
'〈',
'〉',
'《',
'》',
'「',
'」',
'『',
'』',
'【',
'】',
'〔',
'〕',
'!',
'(',
')',
',',
'.',
':',
';',
'?',
}
def check_chinese_char(ch):
"""Check if a character is in Chinese."""
if u'\u4e00' <= ch <= u'\u9fff' or ch in chinese_punctuation:
return True
else:
return False
def normalize_chinese_answer(text):
"""Remove the Chinese punctuation and separate Chinese answers to char-level"""
def remove_punc(text):
exclude = chinese_punctuation
return ''.join(ch for ch in text if ch not in exclude)
def separate_char(text):
ch_list = []
for ch in text:
ch_list.append(ch)
return ch_list
return separate_char(remove_punc(text))
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_label_stats(labels, outfile='stats.tsv', verbose=True):
'''
Args:
labels: list of all labels
outfile: path to the file where to save label stats
Returns:
total (int): total number of labels
label_frequencies (list of tuples): each tuple represent (label, label frequency)
max id of the labels
'''
labels = Counter(labels)
total = sum(labels.values())
out = open(outfile, 'w')
i = 0
freq_dict = {}
label_frequencies = labels.most_common()
for k, v in label_frequencies:
out.write(f'{k}\t\t{round(v/total,5)}\t\t{v}\n')
if verbose and i < 3:
logging.info(f'label: {k}, {v} out of {total} ({(v / total)*100.0:.2f}%).')
i += 1
freq_dict[k] = v
return total, freq_dict, max(labels.keys())
def partition_data(intent_queries, slot_tags, split=0.1):
n = len(intent_queries)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev_intents, dev_slots, train_intents, train_slots = [], [], [], []
dev_intents.append('sentence\tlabel\n')
train_intents.append('sentence\tlabel\n')
for i, item in enumerate(intent_queries):
if i in dev_idx:
dev_intents.append(item)
dev_slots.append(slot_tags[i])
else:
train_intents.append(item)
train_slots.append(slot_tags[i])
return train_intents, train_slots, dev_intents, dev_slots
def write_files(data, outfile):
with open(outfile, 'w') as f:
for item in data:
item = f'{item.strip()}\n'
f.write(item)
def write_data(data, slot_dict, intent_dict, outfold, mode, uncased):
intent_file = open(f'{outfold}/{mode}.tsv', 'w')
intent_file.write('sentence\tlabel\n')
slot_file = open(f'{outfold}/{mode}_slots.tsv', 'w')
for tokens, slots, intent in data:
text = ' '.join(tokens)
if uncased:
text = text.lower()
intent_file.write(f'{text}\t{intent_dict[intent]}\n')
slots = [str(slot_dict[slot]) for slot in slots]
slot_file.write(' '.join(slots) + '\n')
intent_file.close()
slot_file.close()
def create_dataset(train, dev, slots, intents, uncased, outfold):
os.makedirs(outfold, exist_ok=True)
if 'O' in slots:
slots.remove('O')
slots = sorted(list(slots)) + ['O']
intents = sorted(list(intents))
slots = write_vocab(slots, f'{outfold}/dict.slots.csv')
intents = write_vocab(intents, f'{outfold}/dict.intents.csv')
write_data(train, slots, intents, outfold, 'train', uncased)
write_data(dev, slots, intents, outfold, 'test', uncased)
def read_csv(file_path):
rows = []
with open(file_path, 'r') as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
rows.append(row)
return rows
def get_dataset(files, dev_split=0.1):
# entity2value, value2entity = get_entities(files)
data, slots, intents = get_data(files)
if len(data) == 1:
train, dev = partition(data[0], split=dev_split)
else:
train, dev = data[0], data[1]
return train, dev, slots, intents
def partition(data, split=0.1):
n = len(data)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev, train = [], []
for i, item in enumerate(data):
if i in dev_idx:
dev.append(item)
else:
train.append(item)
return train, dev
def map_entities(entity2value, entities):
for key in entities:
if 'data' in entities[key]:
if key not in entity2value:
entity2value[key] = set([])
values = []
for value in entities[key]['data']:
values.append(value['value'])
values.extend(value['synonyms'])
entity2value[key] = entity2value[key] | set(values)
return entity2value
def get_entities(files):
entity2value = {}
for file in files:
with open(file, 'r') as json_file:
data = json.load(json_file)
entity2value = map_entities(entity2value, data['entities'])
value2entity = reverse_dict(entity2value)
return entity2value, value2entity
def get_data(files):
all_data, all_slots, all_intents = [], set(['O']), set()
for file in files:
file_data = []
with open(file, 'r') as json_file:
data = json.load(json_file)
for intent in data['intents']:
all_intents.add(intent)
utterances = data['intents'][intent]['utterances']
for utterance in utterances:
tokens, slots = [], []
for frag in utterance['data']:
frag_tokens = frag['text'].strip().split()
tokens.extend(frag_tokens)
if 'slot_name' not in frag:
slot = 'O'
else:
slot = frag['slot_name']
all_slots.add(slot)
slots.extend([slot] * len(frag_tokens))
file_data.append((tokens, slots, intent))
all_data.append(file_data)
return all_data, all_slots, all_intents
def reverse_dict(entity2value):
value2entity = {}
for entity in entity2value:
for value in entity2value[entity]:
value2entity[value] = entity
return value2entity
def get_intent_labels(intent_file):
labels = {}
label = 0
with open(intent_file, 'r') as f:
for line in f:
intent = line.strip()
labels[intent] = label
label += 1
return labels
def get_stats(lengths):
logging.info('Some stats of the lengths of the sequences:')
lengths = np.asarray(lengths)
logging.info(
f'Min: {np.min(lengths)} | \
Max: {np.max(lengths)} | \
Mean: {np.mean(lengths)} | \
Median: {np.median(lengths)}'
)
logging.info(f'75 percentile: {np.percentile(lengths, 75):.2f}')
logging.info(f'99 percentile: {np.percentile(lengths, 99):.2f}')
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def write_vocab(items, outfile):
vocab = {}
idx = 0
with open(outfile, 'w') as f:
for item in items:
f.write(item + '\n')
vocab[item] = idx
idx += 1
return vocab
def get_labels_to_labels_id_mapping(file):
'''
Reads labels from the file and returns labels to id mapping dictionary
Args:
file: path to file
Returns:
labels to id mapping dictionary
'''
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
label_ids = {lines[i]: i for i in range(len(lines))}
return label_ids
def if_exist(outfold, files):
if not os.path.exists(outfold):
return False
for file in files:
if not os.path.exists(f'{outfold}/{file}'):
return False
return True
def remove_punctuation_from_sentence(sentence):
sentence = re.sub('[' + string.punctuation + ']', '', sentence)
sentence = sentence.lower()
return sentence
def dataset_to_ids(dataset, tokenizer, cache_ids=False, add_bos_eos=True, cache_data_per_node=False, use_cache=False):
"""
Reads dataset from file line by line, tokenizes each line with tokenizer,
and returns list of lists which corresponds to ids of tokenized strings.
Args:
dataset (str): path to dataset
tokenizer: tokenizer to convert text into ids
cache_ids (bool): if True, ids are saved to disk as pickle file
with similar name (e.g., data.txt --> data.txt.pkl)
add_bos_eos (bool): whether to add <s> and </s> symbols (e.g., for NMT)
cache_data_per_node (bool): Cache data on local_rank 0. Use when there is not a shared-filesystem.
use_cache (bool): Use cached ids if they exist.
Returns:
ids: list of ids which correspond to tokenized strings of the dataset
"""
cached_ids_dataset = dataset + str(".pkl")
if use_cache and os.path.isfile(cached_ids_dataset):
logging.info("Loading cached tokenized dataset ...")
ids = pickle.load(open(cached_ids_dataset, "rb"))
else:
logging.info(f"Tokenizing dataset {dataset}...")
data = open(dataset, "rb").readlines()
ids = []
for sentence in tqdm(data, desc='Tokenizing sentence'):
sent_ids = tokenizer.text_to_ids(sentence.decode("utf-8"))
if add_bos_eos:
sent_ids = [tokenizer.bos_id] + sent_ids + [tokenizer.eos_id]
ids.append(sent_ids)
if cache_ids and (
not torch.distributed.is_initialized() or (cache_data_per_node and get_envint("LOCAL_RANK", 0) == 0)
):
logging.info("Caching tokenized dataset ...")
pickle.dump(ids, open(cached_ids_dataset, "wb"))
return ids
def get_freq_weights(label_freq):
"""
Goal is to give more weight to the classes with less samples
so as to match the ones with the higher frequencies. We achieve this by
dividing the total frequency by the freq of each label to calculate its weight.
"""
total_size = 0
for lf in label_freq.values():
total_size += lf
weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()}
return weighted_slots
def fill_class_weights(weights, max_id=-1):
"""
Gets a dictionary of labels with their weights and creates a list with size of the labels filled with those weights.
Missing labels in the dictionary would get value 1.
Args:
weights: dictionary of weights for labels, labels as keys and weights are their values
max_id: the largest label id in the dataset, default=-1 would consider the largest label in the weights dictionary as max_id
Returns:
weights_list: list of weights for labels
"""
if max_id < 0:
max_id = 0
for l in weights.keys():
max_id = max(max_id, l)
all_weights = [1.0] * (max_id + 1)
for i in range(len(all_weights)):
if i in weights:
all_weights[i] = weights[i]
return all_weights
def get_vocab(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
labels = {i: lines[i] for i in range(len(lines))}
return labels
def find_newlines(contents):
"""
Finds all of the newline positions in a text file.
"""
start = 0
while True:
try:
# index and split are much faster than Python for loops
new_start = contents.index(b"\n", start)
line = (
contents[start:new_start]
.replace(b"\xc2\x99", b" ")
.replace(b"\xc2\xa0", b" ")
.decode("utf-8", errors="ignore")
)
if len(line.split()) > 0:
yield start
start = new_start + 1
except ValueError:
break
def load_data_indices(idx_file: str, data_file: str, savename: str):
"""
Loads dataset index file if it exsits
"""
data_dir = data_file[: data_file.rfind('/')]
mode = data_file[data_file.rfind('/') + 1 : data_file.rfind('.')]
idx_file = f"{data_dir}/{mode}_{savename}.pkl"
if os.path.isfile(idx_file):
# If the sentence indices file already exists, load from it
with open(idx_file, "rb") as f:
indices = pickle.load(f)
return indices, idx_file, data_dir
return None, idx_file, data_dir
| 29.710575 | 132 | 0.604783 |
import csv
import json
import os
import pickle
import random
import re
import string
from collections import Counter
import numpy as np
import torch
from tqdm.auto import tqdm
from nemo.utils import logging
from nemo.utils.env_var_parsing import get_envint
__all__ = [
'DataProcessor',
'get_label_stats',
'partition_data',
'write_files',
'write_data',
'create_dataset',
'read_csv',
'get_dataset',
'partition',
'map_entities',
'get_entities',
'get_data',
'reverse_dict',
'get_intent_labels',
'get_stats',
'DATABASE_EXISTS_TMP',
'MODE_EXISTS_TMP',
'is_whitespace',
'write_vocab',
'if_exist',
'remove_punctuation_from_sentence',
'dataset_to_ids',
'get_freq_weights',
'fill_class_weights',
'normalize_answer',
'get_labels_to_labels_id_mapping',
'get_vocab',
'find_newlines',
'load_data_indices',
'chinese_punctuation',
'check_chinese_char',
'normalize_chinese_answer',
]
DATABASE_EXISTS_TMP = '{} dataset has already been processed and stored at {}'
MODE_EXISTS_TMP = '{} mode of {} dataset has already been processed and stored at {}'
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
chinese_punctuation = {
'——',
'‘',
'’',
'“',
'”',
'…',
'、',
'。',
'〈',
'〉',
'《',
'》',
'「',
'」',
'『',
'』',
'【',
'】',
'〔',
'〕',
'!',
'(',
')',
',',
'.',
':',
';',
'?',
}
def check_chinese_char(ch):
if u'\u4e00' <= ch <= u'\u9fff' or ch in chinese_punctuation:
return True
else:
return False
def normalize_chinese_answer(text):
def remove_punc(text):
exclude = chinese_punctuation
return ''.join(ch for ch in text if ch not in exclude)
def separate_char(text):
ch_list = []
for ch in text:
ch_list.append(ch)
return ch_list
return separate_char(remove_punc(text))
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_label_stats(labels, outfile='stats.tsv', verbose=True):
labels = Counter(labels)
total = sum(labels.values())
out = open(outfile, 'w')
i = 0
freq_dict = {}
label_frequencies = labels.most_common()
for k, v in label_frequencies:
out.write(f'{k}\t\t{round(v/total,5)}\t\t{v}\n')
if verbose and i < 3:
logging.info(f'label: {k}, {v} out of {total} ({(v / total)*100.0:.2f}%).')
i += 1
freq_dict[k] = v
return total, freq_dict, max(labels.keys())
def partition_data(intent_queries, slot_tags, split=0.1):
n = len(intent_queries)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev_intents, dev_slots, train_intents, train_slots = [], [], [], []
dev_intents.append('sentence\tlabel\n')
train_intents.append('sentence\tlabel\n')
for i, item in enumerate(intent_queries):
if i in dev_idx:
dev_intents.append(item)
dev_slots.append(slot_tags[i])
else:
train_intents.append(item)
train_slots.append(slot_tags[i])
return train_intents, train_slots, dev_intents, dev_slots
def write_files(data, outfile):
with open(outfile, 'w') as f:
for item in data:
item = f'{item.strip()}\n'
f.write(item)
def write_data(data, slot_dict, intent_dict, outfold, mode, uncased):
intent_file = open(f'{outfold}/{mode}.tsv', 'w')
intent_file.write('sentence\tlabel\n')
slot_file = open(f'{outfold}/{mode}_slots.tsv', 'w')
for tokens, slots, intent in data:
text = ' '.join(tokens)
if uncased:
text = text.lower()
intent_file.write(f'{text}\t{intent_dict[intent]}\n')
slots = [str(slot_dict[slot]) for slot in slots]
slot_file.write(' '.join(slots) + '\n')
intent_file.close()
slot_file.close()
def create_dataset(train, dev, slots, intents, uncased, outfold):
os.makedirs(outfold, exist_ok=True)
if 'O' in slots:
slots.remove('O')
slots = sorted(list(slots)) + ['O']
intents = sorted(list(intents))
slots = write_vocab(slots, f'{outfold}/dict.slots.csv')
intents = write_vocab(intents, f'{outfold}/dict.intents.csv')
write_data(train, slots, intents, outfold, 'train', uncased)
write_data(dev, slots, intents, outfold, 'test', uncased)
def read_csv(file_path):
rows = []
with open(file_path, 'r') as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
rows.append(row)
return rows
def get_dataset(files, dev_split=0.1):
data, slots, intents = get_data(files)
if len(data) == 1:
train, dev = partition(data[0], split=dev_split)
else:
train, dev = data[0], data[1]
return train, dev, slots, intents
def partition(data, split=0.1):
n = len(data)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev, train = [], []
for i, item in enumerate(data):
if i in dev_idx:
dev.append(item)
else:
train.append(item)
return train, dev
def map_entities(entity2value, entities):
for key in entities:
if 'data' in entities[key]:
if key not in entity2value:
entity2value[key] = set([])
values = []
for value in entities[key]['data']:
values.append(value['value'])
values.extend(value['synonyms'])
entity2value[key] = entity2value[key] | set(values)
return entity2value
def get_entities(files):
entity2value = {}
for file in files:
with open(file, 'r') as json_file:
data = json.load(json_file)
entity2value = map_entities(entity2value, data['entities'])
value2entity = reverse_dict(entity2value)
return entity2value, value2entity
def get_data(files):
all_data, all_slots, all_intents = [], set(['O']), set()
for file in files:
file_data = []
with open(file, 'r') as json_file:
data = json.load(json_file)
for intent in data['intents']:
all_intents.add(intent)
utterances = data['intents'][intent]['utterances']
for utterance in utterances:
tokens, slots = [], []
for frag in utterance['data']:
frag_tokens = frag['text'].strip().split()
tokens.extend(frag_tokens)
if 'slot_name' not in frag:
slot = 'O'
else:
slot = frag['slot_name']
all_slots.add(slot)
slots.extend([slot] * len(frag_tokens))
file_data.append((tokens, slots, intent))
all_data.append(file_data)
return all_data, all_slots, all_intents
def reverse_dict(entity2value):
value2entity = {}
for entity in entity2value:
for value in entity2value[entity]:
value2entity[value] = entity
return value2entity
def get_intent_labels(intent_file):
labels = {}
label = 0
with open(intent_file, 'r') as f:
for line in f:
intent = line.strip()
labels[intent] = label
label += 1
return labels
def get_stats(lengths):
logging.info('Some stats of the lengths of the sequences:')
lengths = np.asarray(lengths)
logging.info(
f'Min: {np.min(lengths)} | \
Max: {np.max(lengths)} | \
Mean: {np.mean(lengths)} | \
Median: {np.median(lengths)}'
)
logging.info(f'75 percentile: {np.percentile(lengths, 75):.2f}')
logging.info(f'99 percentile: {np.percentile(lengths, 99):.2f}')
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def write_vocab(items, outfile):
vocab = {}
idx = 0
with open(outfile, 'w') as f:
for item in items:
f.write(item + '\n')
vocab[item] = idx
idx += 1
return vocab
def get_labels_to_labels_id_mapping(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
label_ids = {lines[i]: i for i in range(len(lines))}
return label_ids
def if_exist(outfold, files):
if not os.path.exists(outfold):
return False
for file in files:
if not os.path.exists(f'{outfold}/{file}'):
return False
return True
def remove_punctuation_from_sentence(sentence):
sentence = re.sub('[' + string.punctuation + ']', '', sentence)
sentence = sentence.lower()
return sentence
def dataset_to_ids(dataset, tokenizer, cache_ids=False, add_bos_eos=True, cache_data_per_node=False, use_cache=False):
cached_ids_dataset = dataset + str(".pkl")
if use_cache and os.path.isfile(cached_ids_dataset):
logging.info("Loading cached tokenized dataset ...")
ids = pickle.load(open(cached_ids_dataset, "rb"))
else:
logging.info(f"Tokenizing dataset {dataset}...")
data = open(dataset, "rb").readlines()
ids = []
for sentence in tqdm(data, desc='Tokenizing sentence'):
sent_ids = tokenizer.text_to_ids(sentence.decode("utf-8"))
if add_bos_eos:
sent_ids = [tokenizer.bos_id] + sent_ids + [tokenizer.eos_id]
ids.append(sent_ids)
if cache_ids and (
not torch.distributed.is_initialized() or (cache_data_per_node and get_envint("LOCAL_RANK", 0) == 0)
):
logging.info("Caching tokenized dataset ...")
pickle.dump(ids, open(cached_ids_dataset, "wb"))
return ids
def get_freq_weights(label_freq):
total_size = 0
for lf in label_freq.values():
total_size += lf
weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()}
return weighted_slots
def fill_class_weights(weights, max_id=-1):
if max_id < 0:
max_id = 0
for l in weights.keys():
max_id = max(max_id, l)
all_weights = [1.0] * (max_id + 1)
for i in range(len(all_weights)):
if i in weights:
all_weights[i] = weights[i]
return all_weights
def get_vocab(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
labels = {i: lines[i] for i in range(len(lines))}
return labels
def find_newlines(contents):
start = 0
while True:
try:
new_start = contents.index(b"\n", start)
line = (
contents[start:new_start]
.replace(b"\xc2\x99", b" ")
.replace(b"\xc2\xa0", b" ")
.decode("utf-8", errors="ignore")
)
if len(line.split()) > 0:
yield start
start = new_start + 1
except ValueError:
break
def load_data_indices(idx_file: str, data_file: str, savename: str):
data_dir = data_file[: data_file.rfind('/')]
mode = data_file[data_file.rfind('/') + 1 : data_file.rfind('.')]
idx_file = f"{data_dir}/{mode}_{savename}.pkl"
if os.path.isfile(idx_file):
with open(idx_file, "rb") as f:
indices = pickle.load(f)
return indices, idx_file, data_dir
return None, idx_file, data_dir
| true | true |
f7157672a7aaadbb0f7dae37f20ea58ef3e5d0da | 12,933 | py | Python | lib/model/config.py | Kenneth-Wong/tf-faster-rcnn | a6bd798df1b9075ebdfeb7744fffc13226c3a65e | [
"MIT"
] | null | null | null | lib/model/config.py | Kenneth-Wong/tf-faster-rcnn | a6bd798df1b9075ebdfeb7744fffc13226c3a65e | [
"MIT"
] | null | null | null | lib/model/config.py | Kenneth-Wong/tf-faster-rcnn | a6bd798df1b9075ebdfeb7744fffc13226c3a65e | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Memory options
#
__C.MEM = edict()
# Number of memory iterations
__C.MEM.ITER = 2
# Height of the memory
__C.MEM.INIT_H = 20
# Width of the memory
__C.MEM.INIT_W = 20
# Channel of the memory
__C.MEM.C = 512
# Basic stds in the memory
__C.MEM.STD = 0.01
# Base stds in the memory update function for input features
__C.MEM.U_STD = 0.01
# Region classification
__C.MEM.C_STD = 0.01
# Feature to memory ratio
__C.MEM.FM_R = 1.
# Value to gate ratio
__C.MEM.VG_R = 1.
# FC to Pool ratio when combing the input
__C.MEM.FP_R = 1.
# Conv kernel size for memory
__C.MEM.CONV = 3
# Canonical region size
__C.MEM.CROP_SIZE = 7
# Context aggregation
__C.MEM.CT_L = 3
__C.MEM.CT_CONV = 3
__C.MEM.CT_FCONV = 3
# Input feature
__C.MEM.IN_L = 2
__C.MEM.IN_CONV = 3
# Memory final fc layer channels
__C.MEM.FC_C = 4096
__C.MEM.FC_L = 2
# The weight for the memory based prediction
__C.MEM.WEIGHT = 1.
__C.MEM.REL_WEIGHT = 1.
# Final supervision weight
__C.MEM.WEIGHT_FINAL = 1.
# The threshold to control the entropy of the distribution
__C.MEM.BETA = .5
# The dimension of predicted tag
__C.MEM.TAG_D = 16
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.RATE = 0.0005
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_ITERS = 500
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
__C.TRAIN.REL_BATCH_SIZE = 128
__C.TRAIN.POS_REL_FRACTION = 0.5
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
__C.TRAIN.BBOX_TARGET_NORMALIZATION_FILE = 'bbox_distribution.npy'
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
__C.TRAIN.USE_RPN_DB = True
__C.TRAIN.NUM_NEG_RELS = 128
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
__C.VG_DIR = osp.abspath(osp.join(__C.DATA_DIR, 'vg'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Use an end-to-end tensorflow model.
# Note: models in E2E tensorflow mode have only been tested in feed-forward mode,
# but these models are exportable to other tensorflow instances as GraphDef files.
__C.USE_E2E_TF = True
# Default pooling mode, only 'crop' is available
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8, 16, 32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5, 1, 2]
# Number of filters for the RPN layer
__C.RPN_CHANNELS = 512
__C.BOX_SCALE = 1024
__C.IMG_SCALE = 1024
cfg.BOTTLE_SCALE = 16.0
# EPS, a small number for numerical issue
__C.EPS = 1e-14
__C.GROUP_DIST_THRESH = 20.
__C.PUSH_WEIGHT = 0.1
__C.PULL_WEIGHT = 0.1
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 27 | 91 | 0.710044 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Memory options
#
__C.MEM = edict()
# Number of memory iterations
__C.MEM.ITER = 2
# Height of the memory
__C.MEM.INIT_H = 20
# Width of the memory
__C.MEM.INIT_W = 20
# Channel of the memory
__C.MEM.C = 512
# Basic stds in the memory
__C.MEM.STD = 0.01
# Base stds in the memory update function for input features
__C.MEM.U_STD = 0.01
# Region classification
__C.MEM.C_STD = 0.01
# Feature to memory ratio
__C.MEM.FM_R = 1.
# Value to gate ratio
__C.MEM.VG_R = 1.
# FC to Pool ratio when combing the input
__C.MEM.FP_R = 1.
# Conv kernel size for memory
__C.MEM.CONV = 3
# Canonical region size
__C.MEM.CROP_SIZE = 7
# Context aggregation
__C.MEM.CT_L = 3
__C.MEM.CT_CONV = 3
__C.MEM.CT_FCONV = 3
# Input feature
__C.MEM.IN_L = 2
__C.MEM.IN_CONV = 3
# Memory final fc layer channels
__C.MEM.FC_C = 4096
__C.MEM.FC_L = 2
# The weight for the memory based prediction
__C.MEM.WEIGHT = 1.
__C.MEM.REL_WEIGHT = 1.
# Final supervision weight
__C.MEM.WEIGHT_FINAL = 1.
# The threshold to control the entropy of the distribution
__C.MEM.BETA = .5
# The dimension of predicted tag
__C.MEM.TAG_D = 16
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.RATE = 0.0005
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_ITERS = 500
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
__C.TRAIN.MAX_SIZE = 1000
__C.TRAIN.IMS_PER_BATCH = 1
__C.TRAIN.BATCH_SIZE = 128
__C.TRAIN.REL_BATCH_SIZE = 128
__C.TRAIN.POS_REL_FRACTION = 0.5
__C.TRAIN.FG_FRACTION = 0.25
__C.TRAIN.FG_THRESH = 0.5
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
__C.TRAIN.USE_FLIPPED = True
__C.TRAIN.BBOX_REG = True
__C.TRAIN.BBOX_THRESH = 0.5
__C.TRAIN.SNAPSHOT_ITERS = 5000
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
__C.TRAIN.BBOX_TARGET_NORMALIZATION_FILE = 'bbox_distribution.npy'
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.PROPOSAL_METHOD = 'gt'
__C.TRAIN.HAS_RPN = True
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
__C.TRAIN.RPN_FG_FRACTION = 0.5
__C.TRAIN.RPN_BATCHSIZE = 256
__C.TRAIN.RPN_NMS_THRESH = 0.7
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
__C.TRAIN.USE_ALL_GT = True
__C.TRAIN.USE_RPN_DB = True
__C.TRAIN.NUM_NEG_RELS = 128
__C.TEST = edict()
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
__C.RNG_SEED = 3
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
__C.VG_DIR = osp.abspath(osp.join(__C.DATA_DIR, 'vg'))
__C.MATLAB = 'matlab'
__C.EXP_DIR = 'default'
__C.USE_GPU_NMS = True
__C.USE_E2E_TF = True
__C.POOLING_MODE = 'crop'
__C.POOLING_SIZE = 7
__C.ANCHOR_SCALES = [8, 16, 32]
__C.ANCHOR_RATIOS = [0.5, 1, 2]
__C.RPN_CHANNELS = 512
__C.BOX_SCALE = 1024
__C.IMG_SCALE = 1024
cfg.BOTTLE_SCALE = 16.0
__C.EPS = 1e-14
__C.GROUP_DIST_THRESH = 20.
__C.PUSH_WEIGHT = 0.1
__C.PULL_WEIGHT = 0.1
def get_output_dir(imdb, weights_filename):
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
if type(a) is not edict:
return
for k, v in a.items():
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| true | true |
f715773b79dedecb2423d1c8a82ee28a03b25ac1 | 2,009 | py | Python | tools.py | VieVie31/face_detection | fea010faedcad038f908bdab559eeb0f18ee5063 | [
"MIT"
] | 4 | 2017-10-19T07:41:25.000Z | 2018-11-03T16:10:16.000Z | tools.py | VieVie31/face_detection | fea010faedcad038f908bdab559eeb0f18ee5063 | [
"MIT"
] | null | null | null | tools.py | VieVie31/face_detection | fea010faedcad038f908bdab559eeb0f18ee5063 | [
"MIT"
] | null | null | null | import os
import re
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
def read_pgm(filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
def imread(filename):
if filename[:-4] == 'pgm':
return read_pgm(filename)
else:
return cv2.imread(filename, 0)
def normalize(t):
return (t - t.mean()) / t.std()
def sliding_window(image, stepSize, windowSize):
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def pyramid(image, min_size=64, step=0.75):
w, h = image.shape
yield image
while min(w, h) > min_size:
w, h = image.shape
image = cv2.resize(image, (int(h * step), int(w * step)))
yield image
def distance(a, b):
return sum((a - b)**2) ** .5
def random_split(dataset, training_proportion):
random.shuffle(dataset)
return (
dataset[:int(training_proportion * len(dataset))],
dataset[int(training_proportion * len(dataset)):])
def hist_256(t):
hist = [0] * 256
for v in t:
hist[int(v)] += 1
return hist
def shuffled(lst):
random.shuffle(lst)
return lst
| 28.295775 | 77 | 0.558487 | import os
import re
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
def read_pgm(filename, byteorder='>'):
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
def imread(filename):
if filename[:-4] == 'pgm':
return read_pgm(filename)
else:
return cv2.imread(filename, 0)
def normalize(t):
return (t - t.mean()) / t.std()
def sliding_window(image, stepSize, windowSize):
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def pyramid(image, min_size=64, step=0.75):
w, h = image.shape
yield image
while min(w, h) > min_size:
w, h = image.shape
image = cv2.resize(image, (int(h * step), int(w * step)))
yield image
def distance(a, b):
return sum((a - b)**2) ** .5
def random_split(dataset, training_proportion):
random.shuffle(dataset)
return (
dataset[:int(training_proportion * len(dataset))],
dataset[int(training_proportion * len(dataset)):])
def hist_256(t):
hist = [0] * 256
for v in t:
hist[int(v)] += 1
return hist
def shuffled(lst):
random.shuffle(lst)
return lst
| true | true |
f71578c338458c847d71d9fa063b9ac9dfebe6cd | 5,541 | py | Python | Sporter/test_leeftijdsklassen.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-12-22T13:11:12.000Z | 2021-12-22T13:11:12.000Z | Sporter/test_leeftijdsklassen.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | 9 | 2020-10-28T07:07:05.000Z | 2021-06-28T20:05:37.000Z | Sporter/test_leeftijdsklassen.py | RamonvdW/nhb-apps | 5a9f840bfe066cd964174515c06b806a7b170c69 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.test import TestCase
from django.utils import timezone
from NhbStructuur.models import NhbRegio, NhbVereniging
from .leeftijdsklassen import bereken_leeftijdsklassen
from .models import Sporter
from TestHelpers.e2ehelpers import E2EHelpers
import datetime
class TestSporterLeeftijdsklassen(E2EHelpers, TestCase):
""" unit tests voor de Schutter applicatie, module Leeftijdsklassen """
def setUp(self):
""" initialisatie van de test case """
self.account_admin = self.e2e_create_account_admin()
self.account_normaal = self.e2e_create_account('normaal', 'normaal@test.com', 'Normaal')
self.account_geenlid = self.e2e_create_account('geenlid', 'geenlid@test.com', 'Geen')
# maak een test vereniging
ver = NhbVereniging()
ver.naam = "Grote Club"
ver.ver_nr = "1000"
ver.regio = NhbRegio.objects.get(pk=111)
# secretaris kan nog niet ingevuld worden
ver.save()
# maak een test lid aan
sporter = Sporter()
sporter.lid_nr = 100001
sporter.geslacht = "M"
sporter.voornaam = "Ramon"
sporter.achternaam = "de Tester"
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.account = self.account_normaal
sporter.email = sporter.account.email
sporter.save()
self.sporter1 = sporter
# maak een test lid aan
sporter = Sporter()
sporter.lid_nr = 100002
sporter.geslacht = "V"
sporter.voornaam = "Ramona"
sporter.achternaam = "de Testerin"
sporter.email = ""
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.save()
def test_leeftijdsklassen(self):
now = timezone.now() # is in UTC
now = timezone.localtime(now) # convert to active timezone (say Europe/Amsterdam)
huidige_jaar = now.year
# aspirant
tup = bereken_leeftijdsklassen(huidige_jaar - 9)
self.assertEqual(tup, (huidige_jaar,
9,
['Aspirant', 'Aspirant', 'Aspirant', 'Aspirant', 'Aspirant'],
['Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten 11-12 jaar', 'Aspiranten 11-12 jaar'],
'Aspirant'))
# cadet (14..17)
tup = bereken_leeftijdsklassen(huidige_jaar - 13)
self.assertEqual(tup, (huidige_jaar,
13,
['Aspirant', 'Aspirant', 'Cadet', 'Cadet', 'Cadet'],
['Aspiranten 11-12 jaar', 'Cadetten', 'Cadetten', 'Cadetten', 'Cadetten'],
'Cadet'))
# junior (18..20)
tup = bereken_leeftijdsklassen(huidige_jaar - 18)
self.assertEqual(tup, (huidige_jaar,
18,
['Cadet', 'Junior', 'Junior', 'Junior', 'Senior'],
['Junioren', 'Junioren', 'Junioren', 'Senioren', 'Senioren'],
'Junior'))
# senior
tup = bereken_leeftijdsklassen(huidige_jaar - 21)
self.assertEqual(tup, (huidige_jaar,
21,
['Junior', 'Senior', 'Senior', 'Senior', 'Senior'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
# master
tup = bereken_leeftijdsklassen(huidige_jaar - 50)
self.assertEqual(tup, (huidige_jaar,
50,
['Senior', 'Master', 'Master', 'Master', 'Master'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
# veteraan
tup = bereken_leeftijdsklassen(huidige_jaar - 60)
self.assertEqual(tup, (huidige_jaar,
60,
['Master', 'Veteraan', 'Veteraan', 'Veteraan', 'Veteraan'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
def test_view(self):
# zonder login
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/', follow=True)
self.assert403(resp)
# inlog, geen NHB lid
self.e2e_login(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assert403(resp)
# schutter
self.e2e_login(self.account_normaal)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('sporter/leeftijdsklassen.dtl', 'plein/site_layout.dtl'))
self.e2e_assert_other_http_commands_not_supported('/sporter/leeftijdsklassen/')
# end of file
| 41.044444 | 151 | 0.562714 |
from django.test import TestCase
from django.utils import timezone
from NhbStructuur.models import NhbRegio, NhbVereniging
from .leeftijdsklassen import bereken_leeftijdsklassen
from .models import Sporter
from TestHelpers.e2ehelpers import E2EHelpers
import datetime
class TestSporterLeeftijdsklassen(E2EHelpers, TestCase):
def setUp(self):
self.account_admin = self.e2e_create_account_admin()
self.account_normaal = self.e2e_create_account('normaal', 'normaal@test.com', 'Normaal')
self.account_geenlid = self.e2e_create_account('geenlid', 'geenlid@test.com', 'Geen')
ver = NhbVereniging()
ver.naam = "Grote Club"
ver.ver_nr = "1000"
ver.regio = NhbRegio.objects.get(pk=111)
ver.save()
sporter = Sporter()
sporter.lid_nr = 100001
sporter.geslacht = "M"
sporter.voornaam = "Ramon"
sporter.achternaam = "de Tester"
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.account = self.account_normaal
sporter.email = sporter.account.email
sporter.save()
self.sporter1 = sporter
sporter = Sporter()
sporter.lid_nr = 100002
sporter.geslacht = "V"
sporter.voornaam = "Ramona"
sporter.achternaam = "de Testerin"
sporter.email = ""
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.save()
def test_leeftijdsklassen(self):
now = timezone.now()
now = timezone.localtime(now)
huidige_jaar = now.year
tup = bereken_leeftijdsklassen(huidige_jaar - 9)
self.assertEqual(tup, (huidige_jaar,
9,
['Aspirant', 'Aspirant', 'Aspirant', 'Aspirant', 'Aspirant'],
['Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten 11-12 jaar', 'Aspiranten 11-12 jaar'],
'Aspirant'))
tup = bereken_leeftijdsklassen(huidige_jaar - 13)
self.assertEqual(tup, (huidige_jaar,
13,
['Aspirant', 'Aspirant', 'Cadet', 'Cadet', 'Cadet'],
['Aspiranten 11-12 jaar', 'Cadetten', 'Cadetten', 'Cadetten', 'Cadetten'],
'Cadet'))
tup = bereken_leeftijdsklassen(huidige_jaar - 18)
self.assertEqual(tup, (huidige_jaar,
18,
['Cadet', 'Junior', 'Junior', 'Junior', 'Senior'],
['Junioren', 'Junioren', 'Junioren', 'Senioren', 'Senioren'],
'Junior'))
tup = bereken_leeftijdsklassen(huidige_jaar - 21)
self.assertEqual(tup, (huidige_jaar,
21,
['Junior', 'Senior', 'Senior', 'Senior', 'Senior'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
tup = bereken_leeftijdsklassen(huidige_jaar - 50)
self.assertEqual(tup, (huidige_jaar,
50,
['Senior', 'Master', 'Master', 'Master', 'Master'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
tup = bereken_leeftijdsklassen(huidige_jaar - 60)
self.assertEqual(tup, (huidige_jaar,
60,
['Master', 'Veteraan', 'Veteraan', 'Veteraan', 'Veteraan'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
def test_view(self):
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/', follow=True)
self.assert403(resp)
self.e2e_login(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assert403(resp)
self.e2e_login(self.account_normaal)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assertEqual(resp.status_code, 200)
self.assert_html_ok(resp)
self.assert_template_used(resp, ('sporter/leeftijdsklassen.dtl', 'plein/site_layout.dtl'))
self.e2e_assert_other_http_commands_not_supported('/sporter/leeftijdsklassen/')
| true | true |
f71579a7221e41b1b3740a7e825aa1b7bae7267b | 6,193 | py | Python | test/test_addressspace.py | dendisuhubdy/coriander | 7df182981e5c4a8e043fea25d272d025a953f06d | [
"Apache-2.0"
] | 644 | 2017-05-21T05:25:20.000Z | 2022-03-25T04:18:14.000Z | test/test_addressspace.py | hughperkins/cuda-ir-to-opencl | 7c6b65bc08a25a6bce21efe7b86be8fa985597af | [
"Apache-2.0"
] | 82 | 2017-05-21T15:19:24.000Z | 2022-01-30T01:41:44.000Z | test/test_addressspace.py | hughperkins/cuda-ir-to-opencl | 7c6b65bc08a25a6bce21efe7b86be8fa985597af | [
"Apache-2.0"
] | 88 | 2017-05-21T01:31:16.000Z | 2022-01-31T09:28:17.000Z | # Copyright Hugh Perkins 2016
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pyopencl as cl
import os
import subprocess
from test import test_common
from test.test_common import offset_type
def test_getelementptr_struct_local(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(float *data) {
struct MyStruct astruct;
float *floats = astruct.f0;
}
"""
kernelName = test_common.mangle('foo', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
# float_data_orig = np.copy(float_data)
# kernel(q, (32,), (32,), float_data_gpu, offset_type(0), cl.LocalMemory(4))
# cl.enqueue_copy(q, float_data, float_data_gpu)
# q.finish()
# # print('before', float_data_orig[:5])
# print('after', float_data[:5])
# assert np.abs(float_data_orig[1:32] - float_data[0:31]).max() <= 1e-4
def test_getelementptr_struct_global(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(struct MyStruct mystruct) {
float *floats = mystruct.f0;
}
"""
# kernelName = test_common.mangle('foo', ['float *'])
kernelName = '_Z3foo8MyStruct'
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=3)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
# float_data_orig = np.copy(float_data)
# kernel(q, (32,), (32,), float_data_gpu, offset_type(0), cl.LocalMemory(4))
# cl.enqueue_copy(q, float_data, float_data_gpu)
# q.finish()
# # print('before', float_data_orig[:5])
# print('after', float_data[:5])
# # assert np.abs(float_data_orig[1:32] - float_data[0:31]).max() <= 1e-4
def test_kernelparam_ll(context, q, float_data, float_data_gpu):
ll_code = """define void @mykernel(float * %p1) {
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip().startswith('global float* p1')]) == 1
def test_load_globalfloatstar(context, q, float_data, float_data_gpu):
ll_code = """define void @mykernel(float * %p1) {
%1 = load float, float* %p1
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip() == 'float v2;']) == 1
def x_test_play(context, q, float_data, float_data_gpu):
cu_source = """
__device__ void process(float *data) {
*data = 5.0f;
}
__device__ float process2(float value) {
process(&value);
return value;
}
__global__ void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
data[0] = v;
data[0] = process2(data[0]);
}
"""
kernelName = test_common.mangle('mykernel', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
# kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
# %1 = load float, float* %p1
# %2 = getelementptr
# def test_addr_of_float(context, q, float_data, float_data_gpu):
# ll_code = """define void @mykernel(float * %p1) {
# %1 = alloca float
# %2 = getelementptr float, float* %1, i64 0
# %3 = load float, float* %2
# %4 = getelementptr float, float* %1
# %5 = load float, float* %4
# ret void
# }
# """
# cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
# print('cl_sourcecode', cl_sourcecode)
# assert len([l for l in cl_sourcecode.split('\n') if l.strip() == 'float v2;']) == 1
def test_addr_of_float(context, q, float_data, float_data_gpu):
cu_code = """
__attribute__((global)) void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
def test_struct_byval(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
float **floatstarstar;
};
__attribute__((global)) void mykernel(struct MyStruct myStruct) {
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernel8MyStruct', num_clmems=1)
print('cl_code', cl_code)
def test_internal_struct(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
// float **floatstarstart;
};
__attribute__((device)) void processStruct(MyStruct *myStruct) {
myStruct->afloat = myStruct->floatpointer[0];
}
__attribute__((global)) void mykernel(float *data) {
float afloat = data[0];
float float2 = data[1];
struct MyStruct myStruct = { afloat, 3, &float2 };
processStruct(&myStruct);
data[2] = myStruct.afloat;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
| 32.088083 | 103 | 0.676893 |
import numpy as np
import pyopencl as cl
import os
import subprocess
from test import test_common
from test.test_common import offset_type
def test_getelementptr_struct_local(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(float *data) {
struct MyStruct astruct;
float *floats = astruct.f0;
}
"""
kernelName = test_common.mangle('foo', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
ruct_global(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(struct MyStruct mystruct) {
float *floats = mystruct.f0;
}
"""
kernelName = '_Z3foo8MyStruct'
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=3)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
ykernel(float * %p1) {
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip().startswith('global float* p1')]) == 1
def test_load_globalfloatstar(context, q, float_data, float_data_gpu):
ll_code = """define void @mykernel(float * %p1) {
%1 = load float, float* %p1
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip() == 'float v2;']) == 1
def x_test_play(context, q, float_data, float_data_gpu):
cu_source = """
__device__ void process(float *data) {
*data = 5.0f;
}
__device__ float process2(float value) {
process(&value);
return value;
}
__global__ void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
data[0] = v;
data[0] = process2(data[0]);
}
"""
kernelName = test_common.mangle('mykernel', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
# %1 = alloca float
# %2 = getelementptr float, float* %1, i64 0
# %3 = load float, float* %2
# %4 = getelementptr float, float* %1
# %5 = load float, float* %4
# ret void
# }
# """
def test_addr_of_float(context, q, float_data, float_data_gpu):
cu_code = """
__attribute__((global)) void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
def test_struct_byval(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
float **floatstarstar;
};
__attribute__((global)) void mykernel(struct MyStruct myStruct) {
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernel8MyStruct', num_clmems=1)
print('cl_code', cl_code)
def test_internal_struct(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
// float **floatstarstart;
};
__attribute__((device)) void processStruct(MyStruct *myStruct) {
myStruct->afloat = myStruct->floatpointer[0];
}
__attribute__((global)) void mykernel(float *data) {
float afloat = data[0];
float float2 = data[1];
struct MyStruct myStruct = { afloat, 3, &float2 };
processStruct(&myStruct);
data[2] = myStruct.afloat;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
| true | true |
f7157a1710b2e208b523118567fc8e95d752447c | 16,786 | py | Python | RL_TD3/src/pe_model.py | Crazy-Jack/RL4GRN | e683e17758eb468bd42e0ea0020e2246051c258c | [
"MIT"
] | null | null | null | RL_TD3/src/pe_model.py | Crazy-Jack/RL4GRN | e683e17758eb468bd42e0ea0020e2246051c258c | [
"MIT"
] | null | null | null | RL_TD3/src/pe_model.py | Crazy-Jack/RL4GRN | e683e17758eb468bd42e0ea0020e2246051c258c | [
"MIT"
] | 1 | 2020-12-14T09:32:36.000Z | 2020-12-14T09:32:36.000Z | '''
The probabilistic ensemble dynamics model
'''
# pylint: disable=C0103, R0902, R0913, W0201, E0401, E1120
import time
import itertools
import numpy as np
import tensorflow as tf
from tensorflow import keras
from collections import defaultdict
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class PEModel(keras.Model):
'''
An individual Probabilistic Neural Network.
Multiple Networks with identical structure form the Probabilistic Ensemble.
Notice that each PEModel network predicts the mean and variance of
reward, done, delta_state in order.
Therefore, the output layer has (state_dim + 1 + 1) * 2
'''
def __init__(self, state_dim, action_dim):
super().__init__()
self.l1 = keras.layers.Dense(256, activation="relu")
self.l2 = keras.layers.Dense(256, activation="relu")
self.l3 = keras.layers.Dense(256, activation="relu")
# mean and variance for reward, done, delta_state (in this order)
# Note: we change done to not_done
self.l4 = keras.layers.Dense((state_dim + 2) * 2)
# this step to populate trainable_weights. Without this step,
# PE.trainable_weights will be empty.
self.forward(np.zeros((1, state_dim + action_dim)))
def forward(self, net_input):
'''
Calls the network on a batch of inputs.
net_input should have size (batch_size, state_dim+action_dim)
'''
out = self.l1(net_input)
out = self.l2(out)
out = self.l3(out)
out = self.l4(out)
return out
class PE():
'''
The probabilistic ensemble dynamics model class.
Contains code to initialize, train and then predict with the ensemble.
You will implement part of this class.
'''
def __init__(
self,
state_dim,
action_dim,
num_networks = 7,
num_elites = 5,
learning_rate = 1e-3,
):
self.num_networks = num_networks
self.num_elites = num_elites
self.networks = [PEModel(state_dim, action_dim) for i in range(num_networks)]
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.state_dim = state_dim
self.action_dim = action_dim
self.output_dim = state_dim + 2
# For smoothing the log-variance output
self.max_logvar = tf.convert_to_tensor(-3 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.min_logvar = tf.convert_to_tensor(-7 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.total_it = 0
self._model_inds = list(range(self.num_networks)) # for choosing elite models in inference!
def get_output(self, output, ret_logvar=False):
"""
output: tf tensor, shape (batch_size, (state_dim+2) * 2)
Given network outputs, returns mean and log variance tf tensors if ret_logvar = True.
mean: shape (batch_size, state_dim + 2)
logvar: shape (batch_size, state_dim + 2)
Do not modify
"""
mean = output[:, 0:self.output_dim]
raw_v = output[:, self.output_dim:]
# Log variance smoothing
logvar = self.max_logvar - tf.math.softplus(self.max_logvar - raw_v)
logvar = self.min_logvar + tf.math.softplus(logvar - self.min_logvar)
if ret_logvar: # for training
return mean, logvar
return mean, tf.math.exp(logvar) # for testing
def _train_loss_one(self, network, train_in, train_targ):
'''
Compute the MLE Training Loss for a given Probabilistic Neural Network.
train_in: tf tensor, shape (batch_size, state_dim + action_dim)
tarin_targ: tf tensor, shape (batch_size, state_dim + 2), target output
This function should compute the Gaussian MLE loss, summed across the entire batch.
User note: this contain not done!!
'''
# raise NotImplementedError
pred_mean, pred_var = self.get_output(network.forward(train_in), ret_logvar=True)
train_loss = (pred_mean - train_targ) ** 2 / tf.math.exp(pred_var) + pred_var # [batch_size, state_dim + 2]
train_loss = tf.math.reduce_sum(train_loss)
# regularization step. populate train_loss with correct Gaussian MLE loss
train_loss += 0.01 * (tf.math.reduce_sum(self.max_logvar) - \
tf.math.reduce_sum(self.min_logvar))
return train_loss
def _MSE_loss(self, valid_in, valid_targ, final=False):
"""
Computes the MSE loss for each Probabilistic Neural Network, for validation only.
valid_in: tf tensor, shape (batch_size, state_dim + action_dim), validation input
valid_targ: tf tensor, shape (batch_size, state_dim + 2), validation target
Do not modify.
"""
mse_losses = np.zeros(self.num_networks)
rew_losses = np.zeros(self.num_networks)
not_done_losses = np.zeros(self.num_networks)
dynamics_losses = np.zeros(self.num_networks)
for i, network in enumerate(self.networks):
mean, _ = self.get_output(network.forward(valid_in), ret_logvar=True)
if final:
mse_loss = tf.reduce_mean(((mean - valid_targ) ** 2), 0)
rew_loss = mse_loss[0]
not_done_loss = mse_loss[1]
dynamics_loss = tf.reduce_mean(mse_loss[2:], 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
rew_losses[i] = rew_loss
not_done_losses[i] = not_done_loss
dynamics_losses[i] = dynamics_loss
else:
mse_loss = tf.reduce_mean((mean - valid_targ) ** 2, 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
if final:
return mse_losses, rew_losses, not_done_losses, dynamics_losses
return mse_losses
def _prepare_dataset(self, buffer):
'''
Given a replay buffer containing real environment transitions,
prepare a dataset for training the PE of neural networks.
The dataset contains ALL transitions in the replay buffer.
Do not modify.
inputs: tf tensor, shape (buffer_size, state_dim + action_dim)
targets: tf tensor, shape (buffer_size, state_dim + 2)
'''
state, action, next_state, reward, not_done = buffer.sample_all() # already shuffled
delta_state = next_state - state
inputs = tf.concat((state, action), -1)
targets = tf.concat((reward, not_done, delta_state), -1)
# Both TF tensors
return inputs, targets
def _start_train(self, max_epochs_since_update):
'''
Setup some internal bookkeeping variables to determine convergence.
Do not modify.
'''
self._snapshots = np.array([1e10 for i in range(self.num_networks)])
self._epochs_since_update = 0
self._max_epochs_since_update = max_epochs_since_update
def _end_train(self):
'''
Book keeping and console output. Do not modify.
'''
sorted_inds = np.argsort(self._snapshots)
self._model_inds = sorted_inds[:self.num_elites].tolist() # first elite models
print('Final holdout_losses: ', self._snapshots)
print('Model MSE', np.mean(self._snapshots[self._model_inds]))
print('Rew MSE', np.mean(self._reward_mse[self._model_inds]))
print('Not Done MSE', np.mean(self._not_done_mse[self._model_inds]))
print('Dyn MSE', np.mean(self._dynamics_mse[self._model_inds]))
def _save_best(self, epoch, holdout_losses):
'''
Determines the stopping condition for PE model training.
The training is determined to have converged if for max_epochs_since_update epochs,
no network in the ensemble has improved for more than 1%.
Do not modify.
'''
updated = False
for i in range(len(holdout_losses)):
current = holdout_losses[i]
best = self._snapshots[i]
improvement = (best - current) / best
if improvement > 0.01: # if decrease over 1%, save
self._snapshots[i] = current
#self._save_model(i)
updated = True
# improvement = (best - current) / best
print('epoch {} | updated {} | improvement: {:.4f} | best: {:.4f} | current: {:.4f}'.format(\
epoch, i, improvement, best, current))
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._max_epochs_since_update:
print('[ PE ] Breaking at epoch {}: {} epochs since update ({} max)'.format(epoch,
self._epochs_since_update, self._max_epochs_since_update))
return True
else:
return False
def train(self, buffer, batch_size=256, holdout_ratio=0.2, max_logging=5000,
max_grad_updates=None, max_t=None, max_epochs_since_update=5):
'''
For model training, uses all transitions in real buffer, and train to convergence
in valid set. You will implement part of this training function.
'''
self._start_train(max_epochs_since_update)
inputs, targets = self._prepare_dataset(buffer)
# Split into training and holdout sets
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
inputs, holdout_inputs = inputs[num_holdout:], inputs[:num_holdout]
targets, holdout_targets = targets[num_holdout:], targets[:num_holdout]
print('[ Euler PE ] Training {} | Target {} | Holdout: {}'.format(inputs.shape, targets.shape,
holdout_inputs.shape))
idxs = tf.convert_to_tensor(np.random.randint(inputs.shape[0], size=(inputs.shape[0],)))
num_batch = int(np.ceil(idxs.shape[-1] / batch_size))
# global counter
t0 = time.time()
grad_updates = 0
for epoch in itertools.count(): # infinite loop
for batch_num in range(num_batch):
batch_idxs = idxs[batch_num * batch_size:(batch_num + 1) * batch_size]
# (N, <=B): will include the remainder batch even if out of bounds!
train_in = tf.gather(inputs, batch_idxs)
train_targ = tf.gather(targets, batch_idxs)
# For each network, get loss, compute gradient of loss
# And apply optimizer step.
# raise NotImplementedError
for network in self.networks:
with tf.GradientTape() as tape:
train_loss = self._train_loss_one(network, train_in, train_targ)
network_grad = tape.gradient(train_loss, network.trainable_variables)
self.optimizer.apply_gradients(zip(network_grad, network.trainable_variables))
grad_updates += 1
idxs = tf.random.shuffle(idxs) # shuffle its dataset for each model
# validate each model using same valid set
holdout_losses = self._MSE_loss(holdout_inputs, holdout_targets) # (N,)
break_train = self._save_best(epoch, holdout_losses)
print("[ PE ] holdout_losses: ", f"Epoch {epoch}", holdout_losses) # write to log.txt
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
print('Breaking because of timeout: {}! (max: {})'.format(t, max_t))
break
self._snapshots, self._reward_mse, self._not_done_mse, self._dynamics_mse \
= self._MSE_loss(holdout_inputs, holdout_targets, final=True)
self._end_train()
print(f"End of Model training {epoch} epochs and time {t:.0f}s")
print('Model training epoch', epoch)
print('Model training time', int(t))
return grad_updates
### Rollout / Inference Code
def _prepare_input(self, state, action):
'''
Prepares inputs for inference.
state: tf tensor, size (batch_size, state_dim) or (state_dim, )
action: tf tensor, size (batch_size, action_dim) or (action_dim, )
inputs: tf tensor, size (batch_size, state_dim + action_dim)
Do not modify.
'''
if state.ndim == 1:
state = tf.expand_dims(state, 0)
if action.ndim == 1:
action = tf.expand_dims(action, 0) \
if action.shape[0] == self.action_dim else tf.expand_dims(action, 1)
inputs = tf.concat((state, action), -1)
assert inputs.ndim == 2
return inputs
def _random_inds(self, batch_size):
'''
Uniformly randomly pick one *elite* model for each (state, action) in batch.
This may help you implement predict.
'''
inds = np.random.choice(self._model_inds, size=batch_size)
return inds
def predict(self, state, action, deterministic=False):
'''
Predicts next states, rewards and not_done using the probabilistic ensemble
For each (state, action) pair, pick a elite model uniformly at random, then
use that elite model to predict next state, reward and not_done. The model
can de different for each sample in the batch.
If deterministic=True, then the prediction should simply be the predicted mean.
If deterministic=False, then the prediction should be sampled from N(mean, var),
where mean is the predicted mean and var is the predicted variance.
state: tf tensor, shape (batch_size, state_dim) or (state_dim, )
action: tf tensor, shape (batch_size, action_dim) or (action_dim, )
samples (return value): np array, shape (batch_size, state_dim+2)
samples[:, 0] should be the rewards, samples[:, 1] should be the not-done signals,
and samples[:, 2:] should be the next states.
'''
inputs = self._prepare_input(state, action)
# raise NotImplementedError
batch_size = state.shape[0] if len(state.shape) > 1 else 1
inds = self._random_inds(batch_size) # get random idx
# group idx by network number -> network_number: list(random idx)
network_2_batch_mapping = defaultdict(list)
for batch_number, model_idx in enumerate(inds):
network_2_batch_mapping[model_idx].append(batch_number)
# model forward (for loop by network)
samples = [0] * batch_size
for model_idx, batch_numbers in network_2_batch_mapping.items():
model_inputs = tf.gather_nd(inputs, [[i] for i in batch_numbers])
pred_mean, pred_var = self.get_output(self.networks[model_idx].forward(model_inputs), ret_logvar=False)
zeros_padding = tf.zeros([len(batch_numbers), 2])
cur_state = tf.concat([zeros_padding, tf.gather_nd(state, [[i] for i in batch_numbers])], 1)
pred_mean = pred_mean + cur_state
if deterministic == True:
for idx, bi in enumerate(batch_numbers):
samples[bi] = pred_mean[idx, :]
else:
for idx, bi in enumerate(batch_numbers):
samples[bi] = tf.random.normal(shape = (1, self.state_dim + 2), mean = pred_mean[idx,:], stddev = tf.sqrt(pred_var[idx,:]))
samples = tf.squeeze(tf.convert_to_tensor(samples), 1)
# zeros_padding = tf.zeros([batch_size, 2])
# padded_state_only = tf.concat([zeros_padding, state], 1)
# samples += padded_state_only
return samples
# Sanity Check to test your PE model implementation.
if __name__ == '__main__':
import pybullet_envs
import gym
import utils
env = gym.make("InvertedPendulumBulletEnv-v0")
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
replay_buffer = utils.ReplayBuffer(state_size, action_size, max_size=int(1e6))
o = env.reset()
total_steps = 25000 # one episode has 1000 steps
step = 0
while step < total_steps:
a = env.action_space.sample()
o2, r, d, info = env.step(a)
step += 1
replay_buffer.add(o, a, o2, r, float(d))
o = o2
if d:
o = env.reset()
model = PE(state_size, action_size)
model.train(replay_buffer)
| 43.041026 | 143 | 0.614083 |
import time
import itertools
import numpy as np
import tensorflow as tf
from tensorflow import keras
from collections import defaultdict
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class PEModel(keras.Model):
def __init__(self, state_dim, action_dim):
super().__init__()
self.l1 = keras.layers.Dense(256, activation="relu")
self.l2 = keras.layers.Dense(256, activation="relu")
self.l3 = keras.layers.Dense(256, activation="relu")
self.l4 = keras.layers.Dense((state_dim + 2) * 2)
self.forward(np.zeros((1, state_dim + action_dim)))
def forward(self, net_input):
out = self.l1(net_input)
out = self.l2(out)
out = self.l3(out)
out = self.l4(out)
return out
class PE():
def __init__(
self,
state_dim,
action_dim,
num_networks = 7,
num_elites = 5,
learning_rate = 1e-3,
):
self.num_networks = num_networks
self.num_elites = num_elites
self.networks = [PEModel(state_dim, action_dim) for i in range(num_networks)]
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.state_dim = state_dim
self.action_dim = action_dim
self.output_dim = state_dim + 2
self.max_logvar = tf.convert_to_tensor(-3 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.min_logvar = tf.convert_to_tensor(-7 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.total_it = 0
self._model_inds = list(range(self.num_networks))
def get_output(self, output, ret_logvar=False):
mean = output[:, 0:self.output_dim]
raw_v = output[:, self.output_dim:]
logvar = self.max_logvar - tf.math.softplus(self.max_logvar - raw_v)
logvar = self.min_logvar + tf.math.softplus(logvar - self.min_logvar)
if ret_logvar:
return mean, logvar
return mean, tf.math.exp(logvar)
def _train_loss_one(self, network, train_in, train_targ):
pred_mean, pred_var = self.get_output(network.forward(train_in), ret_logvar=True)
train_loss = (pred_mean - train_targ) ** 2 / tf.math.exp(pred_var) + pred_var
train_loss = tf.math.reduce_sum(train_loss)
train_loss += 0.01 * (tf.math.reduce_sum(self.max_logvar) - \
tf.math.reduce_sum(self.min_logvar))
return train_loss
def _MSE_loss(self, valid_in, valid_targ, final=False):
mse_losses = np.zeros(self.num_networks)
rew_losses = np.zeros(self.num_networks)
not_done_losses = np.zeros(self.num_networks)
dynamics_losses = np.zeros(self.num_networks)
for i, network in enumerate(self.networks):
mean, _ = self.get_output(network.forward(valid_in), ret_logvar=True)
if final:
mse_loss = tf.reduce_mean(((mean - valid_targ) ** 2), 0)
rew_loss = mse_loss[0]
not_done_loss = mse_loss[1]
dynamics_loss = tf.reduce_mean(mse_loss[2:], 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
rew_losses[i] = rew_loss
not_done_losses[i] = not_done_loss
dynamics_losses[i] = dynamics_loss
else:
mse_loss = tf.reduce_mean((mean - valid_targ) ** 2, 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
if final:
return mse_losses, rew_losses, not_done_losses, dynamics_losses
return mse_losses
def _prepare_dataset(self, buffer):
state, action, next_state, reward, not_done = buffer.sample_all()
delta_state = next_state - state
inputs = tf.concat((state, action), -1)
targets = tf.concat((reward, not_done, delta_state), -1)
return inputs, targets
def _start_train(self, max_epochs_since_update):
self._snapshots = np.array([1e10 for i in range(self.num_networks)])
self._epochs_since_update = 0
self._max_epochs_since_update = max_epochs_since_update
def _end_train(self):
sorted_inds = np.argsort(self._snapshots)
self._model_inds = sorted_inds[:self.num_elites].tolist()
print('Final holdout_losses: ', self._snapshots)
print('Model MSE', np.mean(self._snapshots[self._model_inds]))
print('Rew MSE', np.mean(self._reward_mse[self._model_inds]))
print('Not Done MSE', np.mean(self._not_done_mse[self._model_inds]))
print('Dyn MSE', np.mean(self._dynamics_mse[self._model_inds]))
def _save_best(self, epoch, holdout_losses):
updated = False
for i in range(len(holdout_losses)):
current = holdout_losses[i]
best = self._snapshots[i]
improvement = (best - current) / best
if improvement > 0.01:
self._snapshots[i] = current
updated = True
print('epoch {} | updated {} | improvement: {:.4f} | best: {:.4f} | current: {:.4f}'.format(\
epoch, i, improvement, best, current))
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._max_epochs_since_update:
print('[ PE ] Breaking at epoch {}: {} epochs since update ({} max)'.format(epoch,
self._epochs_since_update, self._max_epochs_since_update))
return True
else:
return False
def train(self, buffer, batch_size=256, holdout_ratio=0.2, max_logging=5000,
max_grad_updates=None, max_t=None, max_epochs_since_update=5):
self._start_train(max_epochs_since_update)
inputs, targets = self._prepare_dataset(buffer)
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
inputs, holdout_inputs = inputs[num_holdout:], inputs[:num_holdout]
targets, holdout_targets = targets[num_holdout:], targets[:num_holdout]
print('[ Euler PE ] Training {} | Target {} | Holdout: {}'.format(inputs.shape, targets.shape,
holdout_inputs.shape))
idxs = tf.convert_to_tensor(np.random.randint(inputs.shape[0], size=(inputs.shape[0],)))
num_batch = int(np.ceil(idxs.shape[-1] / batch_size))
t0 = time.time()
grad_updates = 0
for epoch in itertools.count():
for batch_num in range(num_batch):
batch_idxs = idxs[batch_num * batch_size:(batch_num + 1) * batch_size]
train_in = tf.gather(inputs, batch_idxs)
train_targ = tf.gather(targets, batch_idxs)
for network in self.networks:
with tf.GradientTape() as tape:
train_loss = self._train_loss_one(network, train_in, train_targ)
network_grad = tape.gradient(train_loss, network.trainable_variables)
self.optimizer.apply_gradients(zip(network_grad, network.trainable_variables))
grad_updates += 1
idxs = tf.random.shuffle(idxs)
holdout_losses = self._MSE_loss(holdout_inputs, holdout_targets)
break_train = self._save_best(epoch, holdout_losses)
print("[ PE ] holdout_losses: ", f"Epoch {epoch}", holdout_losses)
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
print('Breaking because of timeout: {}! (max: {})'.format(t, max_t))
break
self._snapshots, self._reward_mse, self._not_done_mse, self._dynamics_mse \
= self._MSE_loss(holdout_inputs, holdout_targets, final=True)
self._end_train()
print(f"End of Model training {epoch} epochs and time {t:.0f}s")
print('Model training epoch', epoch)
print('Model training time', int(t))
return grad_updates
if state.ndim == 1:
state = tf.expand_dims(state, 0)
if action.ndim == 1:
action = tf.expand_dims(action, 0) \
if action.shape[0] == self.action_dim else tf.expand_dims(action, 1)
inputs = tf.concat((state, action), -1)
assert inputs.ndim == 2
return inputs
def _random_inds(self, batch_size):
inds = np.random.choice(self._model_inds, size=batch_size)
return inds
def predict(self, state, action, deterministic=False):
inputs = self._prepare_input(state, action)
batch_size = state.shape[0] if len(state.shape) > 1 else 1
inds = self._random_inds(batch_size)
network_2_batch_mapping = defaultdict(list)
for batch_number, model_idx in enumerate(inds):
network_2_batch_mapping[model_idx].append(batch_number)
samples = [0] * batch_size
for model_idx, batch_numbers in network_2_batch_mapping.items():
model_inputs = tf.gather_nd(inputs, [[i] for i in batch_numbers])
pred_mean, pred_var = self.get_output(self.networks[model_idx].forward(model_inputs), ret_logvar=False)
zeros_padding = tf.zeros([len(batch_numbers), 2])
cur_state = tf.concat([zeros_padding, tf.gather_nd(state, [[i] for i in batch_numbers])], 1)
pred_mean = pred_mean + cur_state
if deterministic == True:
for idx, bi in enumerate(batch_numbers):
samples[bi] = pred_mean[idx, :]
else:
for idx, bi in enumerate(batch_numbers):
samples[bi] = tf.random.normal(shape = (1, self.state_dim + 2), mean = pred_mean[idx,:], stddev = tf.sqrt(pred_var[idx,:]))
samples = tf.squeeze(tf.convert_to_tensor(samples), 1)
return samples
if __name__ == '__main__':
import pybullet_envs
import gym
import utils
env = gym.make("InvertedPendulumBulletEnv-v0")
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
replay_buffer = utils.ReplayBuffer(state_size, action_size, max_size=int(1e6))
o = env.reset()
total_steps = 25000
step = 0
while step < total_steps:
a = env.action_space.sample()
o2, r, d, info = env.step(a)
step += 1
replay_buffer.add(o, a, o2, r, float(d))
o = o2
if d:
o = env.reset()
model = PE(state_size, action_size)
model.train(replay_buffer)
| true | true |
f7157a7507148ed0eab64630453d5382f6fcb0e0 | 264 | py | Python | project/api/migrations/0052_merge_0051_catalog_content_0051_video_resource_group.py | hlystovea/BBBS | 7164ef67615e45d750e965bf958af229b56d49e3 | [
"BSD-3-Clause"
] | null | null | null | project/api/migrations/0052_merge_0051_catalog_content_0051_video_resource_group.py | hlystovea/BBBS | 7164ef67615e45d750e965bf958af229b56d49e3 | [
"BSD-3-Clause"
] | 2 | 2021-06-07T14:06:05.000Z | 2021-06-18T16:27:29.000Z | project/api/migrations/0052_merge_0051_catalog_content_0051_video_resource_group.py | hlystovea/BBBS | 7164ef67615e45d750e965bf958af229b56d49e3 | [
"BSD-3-Clause"
] | 2 | 2021-07-27T20:40:18.000Z | 2021-09-12T16:48:19.000Z | # Generated by Django 3.2.3 on 2021-07-13 14:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0051_catalog_content'),
('api', '0051_video_resource_group'),
]
operations = [
]
| 17.6 | 47 | 0.636364 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0051_catalog_content'),
('api', '0051_video_resource_group'),
]
operations = [
]
| true | true |
f7157aa07d402c4517f82d9775f1feb82ec86069 | 1,855 | py | Python | repos/system_upgrade/el7toel8/actors/checkbootavailspace/tests/unit_test.py | panovotn/leapp-repository | e80bdbf65393e68bc2e91b43b46fdd9b9b787878 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/checkbootavailspace/tests/unit_test.py | panovotn/leapp-repository | e80bdbf65393e68bc2e91b43b46fdd9b9b787878 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/checkbootavailspace/tests/unit_test.py | panovotn/leapp-repository | e80bdbf65393e68bc2e91b43b46fdd9b9b787878 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from leapp.libraries.actor.library import (MIN_AVAIL_BYTES_FOR_BOOT,
check_avail_space_on_boot,
inhibit_upgrade)
from leapp import reporting
from leapp.libraries.common.testutils import create_report_mocked
class fake_get_avail_bytes_on_boot(object):
def __init__(self, size):
self.size = size
def __call__(self, *args):
return self.size
def test_not_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
# Test 0 bytes available /boot
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(0)
check_avail_space_on_boot(get_avail_bytes_on_boot)
# Test 0.1 MiB less then required in /boot
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT - 0.1 * 2**20)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 2
def test_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 0
def test_inhibit_upgrade(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
# Test 4.2 MiB available on /boot
bytes_available = 4.2 * 2**20
inhibit_upgrade(bytes_available)
assert reporting.create_report.called == 1
assert 'inhibitor' in reporting.create_report.report_fields['flags']
mib_needed = (MIN_AVAIL_BYTES_FOR_BOOT - bytes_available) / 2**20
assert "needs additional {0} MiB".format(mib_needed) in reporting.create_report.report_fields['summary']
| 35.673077 | 108 | 0.750404 | from __future__ import division
from leapp.libraries.actor.library import (MIN_AVAIL_BYTES_FOR_BOOT,
check_avail_space_on_boot,
inhibit_upgrade)
from leapp import reporting
from leapp.libraries.common.testutils import create_report_mocked
class fake_get_avail_bytes_on_boot(object):
def __init__(self, size):
self.size = size
def __call__(self, *args):
return self.size
def test_not_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(0)
check_avail_space_on_boot(get_avail_bytes_on_boot)
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT - 0.1 * 2**20)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 2
def test_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 0
def test_inhibit_upgrade(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
bytes_available = 4.2 * 2**20
inhibit_upgrade(bytes_available)
assert reporting.create_report.called == 1
assert 'inhibitor' in reporting.create_report.report_fields['flags']
mib_needed = (MIN_AVAIL_BYTES_FOR_BOOT - bytes_available) / 2**20
assert "needs additional {0} MiB".format(mib_needed) in reporting.create_report.report_fields['summary']
| true | true |
f7157bf8af638e897f07970e2094a05bd644cb21 | 162 | py | Python | boa3_test/test_sc/native_test/contractmanagement/DestroyContractTooManyArguments.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/native_test/contractmanagement/DestroyContractTooManyArguments.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/native_test/contractmanagement/DestroyContractTooManyArguments.py | OnBlockIO/neo3-boa | cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from typing import Any
from boa3.builtin.nativecontract.contractmanagement import ContractManagement
def Main(arg0: Any):
ContractManagement.destroy(arg0)
| 20.25 | 77 | 0.820988 | from typing import Any
from boa3.builtin.nativecontract.contractmanagement import ContractManagement
def Main(arg0: Any):
ContractManagement.destroy(arg0)
| true | true |
f7157c2e2f6a53fa18f4f1a00dcbb3a3da29ecfd | 15,984 | py | Python | conta/main/views.py | osso73/contabilidad | babdedfdb47b2b4fd01a09e2db9db5d21bbc88f0 | [
"MIT"
] | null | null | null | conta/main/views.py | osso73/contabilidad | babdedfdb47b2b4fd01a09e2db9db5d21bbc88f0 | [
"MIT"
] | 23 | 2021-12-29T21:41:37.000Z | 2022-03-31T10:01:54.000Z | conta/main/views.py | osso73/contabilidad | babdedfdb47b2b4fd01a09e2db9db5d21bbc88f0 | [
"MIT"
] | 1 | 2022-02-18T19:58:52.000Z | 2022-02-18T19:58:52.000Z | import datetime
from django.shortcuts import render
from django.views import View
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.db.models.deletion import ProtectedError
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from main.models import Etiqueta, Cuenta, Movimiento, FiltroMovimientos, FiltroCuentas
import main.functions as functions
class IndexView(View):
"""Página principal"""
def get(self, request, *args, **kwargs):
context = { 'tab': 'principal' }
return render(request, 'main/index.html', context)
class CuentasView(LoginRequiredMixin, View):
"""Listado de cuentas. Permite añadir una cuenta nueva."""
def get(self, request, pag=1, *args, **kwargs):
lista_cuentas = Cuenta.objects.all()
lista_etiquetas = Etiqueta.objects.all().order_by('id')
# Si no existe el filtro lo crea, con los valores por defecto
filtro = FiltroCuentas.objects.all()
if len(filtro) == 0:
filtro = FiltroCuentas()
filtro.save()
else:
filtro = filtro[0]
# aplica el filtro
if filtro.num:
lista_cuentas = lista_cuentas.filter(pk=filtro.num)
if filtro.nombre:
lista_cuentas = lista_cuentas.filter(nombre__contains=filtro.nombre)
if filtro.etiqueta:
lista_cuentas = lista_cuentas.filter(etiqueta=filtro.etiqueta)
# aplica orden
orden = '-' if not filtro.ascendiente else ''
lista_cuentas = lista_cuentas.order_by(orden+filtro.campo)
# cálculo de paginación. 10 resultados por página
paginacion, num_cuentas, pag, lista_cuentas = functions.get_pagination(pag, lista_cuentas)
context = {
'tab': 'cuentas',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'filtro': filtro,
'paginacion': paginacion,
'pagina_actual': pag,
'num_cuentas': num_cuentas,
}
return render(request, 'main/cuentas.html', context)
def post(self, request, *args, **kwargs):
nueva_cuenta = Cuenta(
num = request.POST['num'].strip(),
nombre = request.POST['nombre']
)
nueva_cuenta.save()
e = request.POST['etiqueta']
if len(e):
nombres_etiquetas = e.split(', ')
nueva_cuenta.etiqueta.set(nombres_etiquetas)
nueva_cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
class AsientosView(LoginRequiredMixin, View):
"""Listado de asientos (o movimientos). Permite añadir un asiento
simple nuevo.
"""
def get(self, request, pag=1, *args, **kwargs):
lista_movimientos = Movimiento.objects.all().order_by('num')
lista_cuentas = Cuenta.objects.all().order_by('num')
# Si no existe el filtro lo crea, con los valores por defecto
filtro = FiltroMovimientos.objects.all()
if len(filtro) == 0:
filtro = FiltroMovimientos()
filtro.save()
else:
filtro = filtro[0]
# aplicación del filtro
if filtro.fecha_inicial:
fecha = datetime.date.fromisoformat(filtro.fecha_inicial)
lista_movimientos = lista_movimientos.filter(fecha__gte=fecha)
if filtro.fecha_final:
fecha = datetime.date.fromisoformat(filtro.fecha_final)
lista_movimientos = lista_movimientos.filter(fecha__lte=fecha)
if filtro.cuenta:
lista_movimientos = lista_movimientos.filter(cuenta=filtro.cuenta)
if filtro.descripcion:
lista_movimientos = lista_movimientos.filter(descripcion__contains=filtro.descripcion)
if filtro.asiento:
lista_movimientos = lista_movimientos.filter(num=int(filtro.asiento))
total_haber = total_debe = 0
for m in lista_movimientos:
total_debe += m.debe
total_haber += m.haber
total = total_haber - total_debe
# aplica orden
orden = '-' if not filtro.ascendiente else ''
lista_movimientos = lista_movimientos.order_by(orden+filtro.campo)
# cálculo de paginación. 25 resultados por página
paginacion, num_movimientos, pag, lista_movimientos = functions.get_pagination(pag, lista_movimientos)
context = {
'tab': 'asientos',
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas,
'filtro': filtro,
'total_debe': total_debe,
'total_haber': total_haber,
'total': total,
'paginacion': paginacion,
'pagina_actual': pag,
'num_movimientos': num_movimientos,
}
return render(request, 'main/asientos.html', context)
def post(self, request, *args, **kwargs):
num = functions.max_num_asiento()
pk_debe = request.POST['debe'].split(':')[0]
pk_haber = request.POST['haber'].split(':')[0]
simple = {
'num': num+1,
'fecha': request.POST['fecha'],
'descripcion': request.POST['descripcion'],
'valor': request.POST['valor'],
'debe': Cuenta.objects.get(pk=pk_debe),
'haber': Cuenta.objects.get(pk=pk_haber)
}
functions.crea_asiento_simple(simple)
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarAsientoView(LoginRequiredMixin, View):
def get(self, request, num):
lista_movimientos = [ a for a in Movimiento.objects.all() if a.num == num ]
lista_cuentas = Cuenta.objects.all()
for movimiento in lista_movimientos:
fecha_movimiento = f'{movimiento.fecha.year}-{movimiento.fecha.month:02d}-{movimiento.fecha.day:02d}'
movimiento.fecha = fecha_movimiento
context = {
'tab': 'asientos',
'num_asiento': num,
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas
}
return render(request, 'main/modificar_asiento.html', context)
def post(self, request, *args, **kwargs):
num_items = int((len(request.POST) -1 )/ 7)
for i in range(num_items):
movimiento = Movimiento.objects.get(id=request.POST[f'id{i}'])
movimiento.num = int(request.POST[f'num{i}'])
movimiento.fecha = request.POST[f'fecha{i}']
movimiento.descripcion = request.POST[f'descripcion{i}']
movimiento.debe = float(request.POST[f'debe{i}'])
movimiento.haber = float(request.POST[f'haber{i}'])
num_cuenta = int(request.POST[f'cuenta{i}'].split(':')[0])
cuenta = Cuenta.objects.get(num=num_cuenta)
movimiento.cuenta = cuenta
movimiento.save()
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarCuentaView(LoginRequiredMixin, View):
def get(self, request, num):
context = {
'tab': 'cuentas',
'cuenta': Cuenta.objects.get(pk=num),
}
return render(request, 'main/modificar_cuenta.html', context)
def post(self, request, *args, **kwargs):
cuenta = Cuenta.objects.get(pk=request.POST['num'])
cuenta.nombre = request.POST['nombre']
etiquetas = request.POST['etiqueta'].split(', ')
# validación etiquetas
lista_etiquetas = Etiqueta.objects.all()
etiquetas_sin_error = list()
for e in etiquetas:
if lista_etiquetas.filter(id=e):
etiquetas_sin_error.append(e)
cuenta.etiqueta.set(etiquetas_sin_error)
cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_movimiento(request, pk, pagina, num_asiento=None):
movimiento = Movimiento.objects.get(pk=pk)
movimiento.delete()
if num_asiento:
return HttpResponseRedirect(reverse(f'main:{pagina}', args=[num_asiento]))
else:
return HttpResponseRedirect(reverse(f'main:{pagina}'))
@login_required
def anadir_movimiento(request, num, fecha):
movimiento = Movimiento(
num = num,
fecha = fecha,
descripcion = '',
debe = 0,
haber = 0,
cuenta = Cuenta.objects.all()[0]
)
movimiento.save()
return HttpResponseRedirect(reverse(f'main:modificar_asiento', args=[num]))
@login_required
def borrar_cuenta(request, pk):
cuenta = Cuenta.objects.get(pk=pk)
try:
cuenta.delete()
except ProtectedError as e:
aviso = {
'mensaje': "Esta cuenta no se puede borrar, porque tiene movimientos asociados.",
'nuevo_url': reverse('main:cuentas'),
}
context = {
'tab': 'cuentas',
'aviso': aviso,
}
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
class CargarCuentas(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:cuentas'))
def post(self, request, *args, **kwargs):
datos_excel = functions.extraer_cuentas(request.FILES['file'])
sobreescribir = request.POST.get('sobreescribir', False)
cuentas_anadidas, cuentas_error = functions.crear_cuentas(datos_excel, sobreescribir)
context = {
'tab': 'cuentas',
'cuentas_anadidas': cuentas_anadidas,
'cuentas_error': cuentas_error,
}
return render(request, 'main/cargar_cuentas.html', context)
class CargarAsientos(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:asientos'))
def post(self, request, *args, **kwargs):
simple, compleja = functions.extraer_asientos(request.FILES['file'])
movimientos_anadidos, errores_simple, errores_compleja = functions.crear_asientos(simple, compleja)
context = {
'tab': 'asientos',
'movimientos_anadidos': movimientos_anadidos,
'errores_simple': errores_simple,
'errores_compleja': errores_compleja,
'num_errores': len(errores_simple) + len(errores_compleja)
}
return render(request, 'main/cargar_asientos.html', context)
@login_required
def filtro_cuentas(request):
if request.method == 'POST':
filtro = FiltroCuentas.objects.all()[0]
if request.POST['accion_filtro'] == 'aplicar':
filtro.num = request.POST['f_num']
filtro.nombre = request.POST['f_nombre']
filtro.etiqueta = request.POST['f_etiqueta']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro.num = ''
filtro.nombre = ''
filtro.etiqueta = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def filtro_asientos(request):
if request.method == 'POST':
if request.POST['accion_filtro'] == 'aplicar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = request.POST['f_fecha_inicial']
filtro.fecha_final = request.POST['f_fecha_final']
filtro.descripcion = request.POST['f_descripcion']
filtro.cuenta = request.POST['f_cuenta'].split(':')[0]
filtro.asiento = request.POST['f_asiento']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = ''
filtro.fecha_final = ''
filtro.descripcion = ''
filtro.cuenta = ''
filtro.asiento = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:asientos'))
@login_required
def cambiar_orden(request, tipo, campo):
if tipo == 'asientos':
filtro = FiltroMovimientos.objects.all()[0]
elif tipo == 'cuentas':
filtro = FiltroCuentas.objects.all()[0]
else:
return HttpResponseRedirect(reverse('main:index'))
if filtro.campo == campo.lower():
filtro.ascendiente = not filtro.ascendiente
else:
filtro.campo = campo.lower()
filtro.ascendiente = True
filtro.save()
return HttpResponseRedirect(reverse('main:'+tipo))
@login_required
def gestionar_etiqueta(request):
"""Gestiona el formulario para añadir o borrar etiquetas, dentro de la
vista de cuentas. Solo gestiona peticiones de tipo post.
"""
if request.method == 'POST':
accion = request.POST['accion_etiqueta']
id = request.POST['e_id']
nombre = request.POST['e_nombre']
if accion == 'anadir':
Etiqueta.objects.create(
id = id,
nombre = nombre,
)
elif accion == 'borrar':
e = Etiqueta.objects.filter(id=id)
if len(e):
e[0].delete()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
class InformesView(LoginRequiredMixin, View):
"""Página principal"""
def get(self, request, *args, **kwargs):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'df': {'empty': True },
}
return render(request, 'main/informes.html', context)
def post(self, request):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
movimientos = Movimiento.objects.all()
movimientos = functions.filtra_movimientos(request.POST, movimientos)
df = functions.genera_informe(request.POST['f_tipo'], movimientos)
titulo, subtitulo = functions.titulo_informe(request.POST)
graph = functions.grafico_informe(df)
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'titulo': titulo,
'subtitulo': subtitulo,
'df': df,
'filtro': request.POST,
'graph': graph,
}
return render(request, 'main/informes.html', context)
@login_required
def borrar_multiples_cuentas(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
cuenta = Cuenta.objects.get(pk=request.POST[checked])
try:
cuenta.delete()
except ProtectedError as e:
errors.append(cuenta)
context = { 'tab': 'cuentas' }
if errors:
nombres = [ c.nombre for c in errors ]
nombres = ", ".join(nombres)
aviso = {
'mensaje': f"La(s) siguiente(s) cuentas no se pueden borrar, porque tienen movimientos asociados: {nombres}.",
'nuevo_url': reverse('main:cuentas'),
}
context['aviso'] = aviso
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_multiples_movimientos(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
movimiento = Movimiento.objects.get(pk=request.POST[checked])
movimiento.delete()
return HttpResponseRedirect(reverse('main:asientos'))
| 34.081023 | 126 | 0.611424 | import datetime
from django.shortcuts import render
from django.views import View
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.db.models.deletion import ProtectedError
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from main.models import Etiqueta, Cuenta, Movimiento, FiltroMovimientos, FiltroCuentas
import main.functions as functions
class IndexView(View):
def get(self, request, *args, **kwargs):
context = { 'tab': 'principal' }
return render(request, 'main/index.html', context)
class CuentasView(LoginRequiredMixin, View):
def get(self, request, pag=1, *args, **kwargs):
lista_cuentas = Cuenta.objects.all()
lista_etiquetas = Etiqueta.objects.all().order_by('id')
filtro = FiltroCuentas.objects.all()
if len(filtro) == 0:
filtro = FiltroCuentas()
filtro.save()
else:
filtro = filtro[0]
if filtro.num:
lista_cuentas = lista_cuentas.filter(pk=filtro.num)
if filtro.nombre:
lista_cuentas = lista_cuentas.filter(nombre__contains=filtro.nombre)
if filtro.etiqueta:
lista_cuentas = lista_cuentas.filter(etiqueta=filtro.etiqueta)
orden = '-' if not filtro.ascendiente else ''
lista_cuentas = lista_cuentas.order_by(orden+filtro.campo)
paginacion, num_cuentas, pag, lista_cuentas = functions.get_pagination(pag, lista_cuentas)
context = {
'tab': 'cuentas',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'filtro': filtro,
'paginacion': paginacion,
'pagina_actual': pag,
'num_cuentas': num_cuentas,
}
return render(request, 'main/cuentas.html', context)
def post(self, request, *args, **kwargs):
nueva_cuenta = Cuenta(
num = request.POST['num'].strip(),
nombre = request.POST['nombre']
)
nueva_cuenta.save()
e = request.POST['etiqueta']
if len(e):
nombres_etiquetas = e.split(', ')
nueva_cuenta.etiqueta.set(nombres_etiquetas)
nueva_cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
class AsientosView(LoginRequiredMixin, View):
def get(self, request, pag=1, *args, **kwargs):
lista_movimientos = Movimiento.objects.all().order_by('num')
lista_cuentas = Cuenta.objects.all().order_by('num')
filtro = FiltroMovimientos.objects.all()
if len(filtro) == 0:
filtro = FiltroMovimientos()
filtro.save()
else:
filtro = filtro[0]
if filtro.fecha_inicial:
fecha = datetime.date.fromisoformat(filtro.fecha_inicial)
lista_movimientos = lista_movimientos.filter(fecha__gte=fecha)
if filtro.fecha_final:
fecha = datetime.date.fromisoformat(filtro.fecha_final)
lista_movimientos = lista_movimientos.filter(fecha__lte=fecha)
if filtro.cuenta:
lista_movimientos = lista_movimientos.filter(cuenta=filtro.cuenta)
if filtro.descripcion:
lista_movimientos = lista_movimientos.filter(descripcion__contains=filtro.descripcion)
if filtro.asiento:
lista_movimientos = lista_movimientos.filter(num=int(filtro.asiento))
total_haber = total_debe = 0
for m in lista_movimientos:
total_debe += m.debe
total_haber += m.haber
total = total_haber - total_debe
orden = '-' if not filtro.ascendiente else ''
lista_movimientos = lista_movimientos.order_by(orden+filtro.campo)
paginacion, num_movimientos, pag, lista_movimientos = functions.get_pagination(pag, lista_movimientos)
context = {
'tab': 'asientos',
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas,
'filtro': filtro,
'total_debe': total_debe,
'total_haber': total_haber,
'total': total,
'paginacion': paginacion,
'pagina_actual': pag,
'num_movimientos': num_movimientos,
}
return render(request, 'main/asientos.html', context)
def post(self, request, *args, **kwargs):
num = functions.max_num_asiento()
pk_debe = request.POST['debe'].split(':')[0]
pk_haber = request.POST['haber'].split(':')[0]
simple = {
'num': num+1,
'fecha': request.POST['fecha'],
'descripcion': request.POST['descripcion'],
'valor': request.POST['valor'],
'debe': Cuenta.objects.get(pk=pk_debe),
'haber': Cuenta.objects.get(pk=pk_haber)
}
functions.crea_asiento_simple(simple)
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarAsientoView(LoginRequiredMixin, View):
def get(self, request, num):
lista_movimientos = [ a for a in Movimiento.objects.all() if a.num == num ]
lista_cuentas = Cuenta.objects.all()
for movimiento in lista_movimientos:
fecha_movimiento = f'{movimiento.fecha.year}-{movimiento.fecha.month:02d}-{movimiento.fecha.day:02d}'
movimiento.fecha = fecha_movimiento
context = {
'tab': 'asientos',
'num_asiento': num,
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas
}
return render(request, 'main/modificar_asiento.html', context)
def post(self, request, *args, **kwargs):
num_items = int((len(request.POST) -1 )/ 7)
for i in range(num_items):
movimiento = Movimiento.objects.get(id=request.POST[f'id{i}'])
movimiento.num = int(request.POST[f'num{i}'])
movimiento.fecha = request.POST[f'fecha{i}']
movimiento.descripcion = request.POST[f'descripcion{i}']
movimiento.debe = float(request.POST[f'debe{i}'])
movimiento.haber = float(request.POST[f'haber{i}'])
num_cuenta = int(request.POST[f'cuenta{i}'].split(':')[0])
cuenta = Cuenta.objects.get(num=num_cuenta)
movimiento.cuenta = cuenta
movimiento.save()
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarCuentaView(LoginRequiredMixin, View):
def get(self, request, num):
context = {
'tab': 'cuentas',
'cuenta': Cuenta.objects.get(pk=num),
}
return render(request, 'main/modificar_cuenta.html', context)
def post(self, request, *args, **kwargs):
cuenta = Cuenta.objects.get(pk=request.POST['num'])
cuenta.nombre = request.POST['nombre']
etiquetas = request.POST['etiqueta'].split(', ')
lista_etiquetas = Etiqueta.objects.all()
etiquetas_sin_error = list()
for e in etiquetas:
if lista_etiquetas.filter(id=e):
etiquetas_sin_error.append(e)
cuenta.etiqueta.set(etiquetas_sin_error)
cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_movimiento(request, pk, pagina, num_asiento=None):
movimiento = Movimiento.objects.get(pk=pk)
movimiento.delete()
if num_asiento:
return HttpResponseRedirect(reverse(f'main:{pagina}', args=[num_asiento]))
else:
return HttpResponseRedirect(reverse(f'main:{pagina}'))
@login_required
def anadir_movimiento(request, num, fecha):
movimiento = Movimiento(
num = num,
fecha = fecha,
descripcion = '',
debe = 0,
haber = 0,
cuenta = Cuenta.objects.all()[0]
)
movimiento.save()
return HttpResponseRedirect(reverse(f'main:modificar_asiento', args=[num]))
@login_required
def borrar_cuenta(request, pk):
cuenta = Cuenta.objects.get(pk=pk)
try:
cuenta.delete()
except ProtectedError as e:
aviso = {
'mensaje': "Esta cuenta no se puede borrar, porque tiene movimientos asociados.",
'nuevo_url': reverse('main:cuentas'),
}
context = {
'tab': 'cuentas',
'aviso': aviso,
}
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
class CargarCuentas(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:cuentas'))
def post(self, request, *args, **kwargs):
datos_excel = functions.extraer_cuentas(request.FILES['file'])
sobreescribir = request.POST.get('sobreescribir', False)
cuentas_anadidas, cuentas_error = functions.crear_cuentas(datos_excel, sobreescribir)
context = {
'tab': 'cuentas',
'cuentas_anadidas': cuentas_anadidas,
'cuentas_error': cuentas_error,
}
return render(request, 'main/cargar_cuentas.html', context)
class CargarAsientos(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:asientos'))
def post(self, request, *args, **kwargs):
simple, compleja = functions.extraer_asientos(request.FILES['file'])
movimientos_anadidos, errores_simple, errores_compleja = functions.crear_asientos(simple, compleja)
context = {
'tab': 'asientos',
'movimientos_anadidos': movimientos_anadidos,
'errores_simple': errores_simple,
'errores_compleja': errores_compleja,
'num_errores': len(errores_simple) + len(errores_compleja)
}
return render(request, 'main/cargar_asientos.html', context)
@login_required
def filtro_cuentas(request):
if request.method == 'POST':
filtro = FiltroCuentas.objects.all()[0]
if request.POST['accion_filtro'] == 'aplicar':
filtro.num = request.POST['f_num']
filtro.nombre = request.POST['f_nombre']
filtro.etiqueta = request.POST['f_etiqueta']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro.num = ''
filtro.nombre = ''
filtro.etiqueta = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def filtro_asientos(request):
if request.method == 'POST':
if request.POST['accion_filtro'] == 'aplicar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = request.POST['f_fecha_inicial']
filtro.fecha_final = request.POST['f_fecha_final']
filtro.descripcion = request.POST['f_descripcion']
filtro.cuenta = request.POST['f_cuenta'].split(':')[0]
filtro.asiento = request.POST['f_asiento']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = ''
filtro.fecha_final = ''
filtro.descripcion = ''
filtro.cuenta = ''
filtro.asiento = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:asientos'))
@login_required
def cambiar_orden(request, tipo, campo):
if tipo == 'asientos':
filtro = FiltroMovimientos.objects.all()[0]
elif tipo == 'cuentas':
filtro = FiltroCuentas.objects.all()[0]
else:
return HttpResponseRedirect(reverse('main:index'))
if filtro.campo == campo.lower():
filtro.ascendiente = not filtro.ascendiente
else:
filtro.campo = campo.lower()
filtro.ascendiente = True
filtro.save()
return HttpResponseRedirect(reverse('main:'+tipo))
@login_required
def gestionar_etiqueta(request):
if request.method == 'POST':
accion = request.POST['accion_etiqueta']
id = request.POST['e_id']
nombre = request.POST['e_nombre']
if accion == 'anadir':
Etiqueta.objects.create(
id = id,
nombre = nombre,
)
elif accion == 'borrar':
e = Etiqueta.objects.filter(id=id)
if len(e):
e[0].delete()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
class InformesView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'df': {'empty': True },
}
return render(request, 'main/informes.html', context)
def post(self, request):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
movimientos = Movimiento.objects.all()
movimientos = functions.filtra_movimientos(request.POST, movimientos)
df = functions.genera_informe(request.POST['f_tipo'], movimientos)
titulo, subtitulo = functions.titulo_informe(request.POST)
graph = functions.grafico_informe(df)
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'titulo': titulo,
'subtitulo': subtitulo,
'df': df,
'filtro': request.POST,
'graph': graph,
}
return render(request, 'main/informes.html', context)
@login_required
def borrar_multiples_cuentas(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
cuenta = Cuenta.objects.get(pk=request.POST[checked])
try:
cuenta.delete()
except ProtectedError as e:
errors.append(cuenta)
context = { 'tab': 'cuentas' }
if errors:
nombres = [ c.nombre for c in errors ]
nombres = ", ".join(nombres)
aviso = {
'mensaje': f"La(s) siguiente(s) cuentas no se pueden borrar, porque tienen movimientos asociados: {nombres}.",
'nuevo_url': reverse('main:cuentas'),
}
context['aviso'] = aviso
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_multiples_movimientos(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
movimiento = Movimiento.objects.get(pk=request.POST[checked])
movimiento.delete()
return HttpResponseRedirect(reverse('main:asientos'))
| true | true |
f7157cc2826df7834aa60b3fb11396d26a4e5f5b | 2,465 | py | Python | day_15/second.py | Mizux/adventofcode | 8bca6b5db1b9f2e64b4038d32680d07766d14e2d | [
"Apache-2.0"
] | 1 | 2021-12-11T19:41:25.000Z | 2021-12-11T19:41:25.000Z | day_15/second.py | Mizux/adventofcode | 8bca6b5db1b9f2e64b4038d32680d07766d14e2d | [
"Apache-2.0"
] | null | null | null | day_15/second.py | Mizux/adventofcode | 8bca6b5db1b9f2e64b4038d32680d07766d14e2d | [
"Apache-2.0"
] | 1 | 2021-12-06T12:09:44.000Z | 2021-12-06T12:09:44.000Z | #!/usr/bin/env python3
from collections import deque
FILE='test.txt' # sol: 40
FILE='input.txt' # sol: 824
def print_board(board):
for row in board:
print(''.join([str(i) for i in row]))
def parse_input(file, repeat):
board = []
for i in range(repeat):
with open(file, 'r') as f:
for line in f:
board.append([int(c) for c in line.strip()] * repeat)
#print_board(board)
return board
def compute_board(board, repeat):
height = len(board) // repeat
width = len(board[0]) // repeat
# for each grid row
for row_repeat in range(repeat):
if row_repeat != 0: # don't touch grid (0,0)
# update first grid column
for row in range(height):
for col in range(width):
if board[height*(row_repeat-1)+row][col] < 9:
board[height*row_repeat+row][col] = board[height*(row_repeat-1)+row][col] + 1
else:
board[height*row_repeat+row][col] = 1
# update remaining grid columns
for col_repeat in range(1, repeat):
for row in range(height):
for col in range(width):
if board[height*row_repeat+row][width*(col_repeat-1)+col] < 9:
board[height*row_repeat+row][width*col_repeat+col] = board[height*row_repeat+row][width*(col_repeat-1)+col] + 1
else:
board[height*row_repeat+row][width*col_repeat+col] = 1
def get_neighbour(board, pos):
out = []
if pos[0] > 0:
out.append((pos[0]-1, pos[1]))
if pos[0] < len(board) - 1:
out.append((pos[0]+1, pos[1]))
if pos[1] > 0:
out.append((pos[0], pos[1] - 1))
if pos[1] < len(board[0]) - 1:
out.append((pos[0], pos[1] + 1))
return out
def dijkstra(board, start):
queue = deque([start])
distance = {start: 0}
while queue:
cur = queue.popleft()
for point in get_neighbour(board, cur):
dst = distance[cur] + board[point[0]][point[1]]
if (point not in distance or dst < distance[point]):
distance[point] = dst
queue.append(point)
return distance
repeat = 5
board = parse_input(FILE, repeat)
compute_board(board, repeat)
#print_board(board)
distance = dijkstra(board, (0,0))
end = (len(board)-1, len(board[0])-1)
print(f'result {distance[end]}')
| 31.602564 | 135 | 0.55213 |
from collections import deque
FILE='test.txt'
FILE='input.txt'
def print_board(board):
for row in board:
print(''.join([str(i) for i in row]))
def parse_input(file, repeat):
board = []
for i in range(repeat):
with open(file, 'r') as f:
for line in f:
board.append([int(c) for c in line.strip()] * repeat)
return board
def compute_board(board, repeat):
height = len(board) // repeat
width = len(board[0]) // repeat
for row_repeat in range(repeat):
if row_repeat != 0:
# update first grid column
for row in range(height):
for col in range(width):
if board[height*(row_repeat-1)+row][col] < 9:
board[height*row_repeat+row][col] = board[height*(row_repeat-1)+row][col] + 1
else:
board[height*row_repeat+row][col] = 1
# update remaining grid columns
for col_repeat in range(1, repeat):
for row in range(height):
for col in range(width):
if board[height*row_repeat+row][width*(col_repeat-1)+col] < 9:
board[height*row_repeat+row][width*col_repeat+col] = board[height*row_repeat+row][width*(col_repeat-1)+col] + 1
else:
board[height*row_repeat+row][width*col_repeat+col] = 1
def get_neighbour(board, pos):
out = []
if pos[0] > 0:
out.append((pos[0]-1, pos[1]))
if pos[0] < len(board) - 1:
out.append((pos[0]+1, pos[1]))
if pos[1] > 0:
out.append((pos[0], pos[1] - 1))
if pos[1] < len(board[0]) - 1:
out.append((pos[0], pos[1] + 1))
return out
def dijkstra(board, start):
queue = deque([start])
distance = {start: 0}
while queue:
cur = queue.popleft()
for point in get_neighbour(board, cur):
dst = distance[cur] + board[point[0]][point[1]]
if (point not in distance or dst < distance[point]):
distance[point] = dst
queue.append(point)
return distance
repeat = 5
board = parse_input(FILE, repeat)
compute_board(board, repeat)
#print_board(board)
distance = dijkstra(board, (0,0))
end = (len(board)-1, len(board[0])-1)
print(f'result {distance[end]}')
| true | true |
f7157d354d86263b22ff896993d75bae3d71e43b | 21,644 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_subnets_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_subnets_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_subnets_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
"""SubnetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Subnet"]
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2017_06_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SubnetListResult"]
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
| 48.747748 | 220 | 0.660968 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name,
virtual_network_name,
subnet_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def begin_delete(
self,
resource_group_name,
virtual_network_name,
subnet_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def get(
self,
resource_group_name,
virtual_network_name,
subnet_name,
expand=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def _create_or_update_initial(
self,
resource_group_name,
virtual_network_name,
subnet_name,
subnet_parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def begin_create_or_update(
self,
resource_group_name,
virtual_network_name,
subnet_name,
subnet_parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def list(
self,
resource_group_name,
virtual_network_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'}
| true | true |
f7157de80611bef9e79b6363562de1eb0d53d409 | 14,816 | py | Python | assignment-01/assignment-01.py | ehumss/itu-blu537e-data-analysis-and-visualisation | a401b0d8580d2021a9f634607339d074327276cd | [
"MIT"
] | 1 | 2020-01-28T12:48:53.000Z | 2020-01-28T12:48:53.000Z | assignment-01/assignment-01.py | ehumss/itu-blu537e-data-analysis-and-visualisation | a401b0d8580d2021a9f634607339d074327276cd | [
"MIT"
] | null | null | null | assignment-01/assignment-01.py | ehumss/itu-blu537e-data-analysis-and-visualisation | a401b0d8580d2021a9f634607339d074327276cd | [
"MIT"
] | null | null | null | ####################################################################################################
#
# ISTANBUL TECHNICAL UNIVERSITY
# BLU 537E - Data Analysis & Visualization
# Assignment 01
#
####################################################################################################
#
# PROBLEM 1
#
# A store charges $12 per item if you buy less than 10 items.
#
# If you buy between 10 and 99 items, the cost is $10 per item.
#
# If you buy 100 or more items, the cost is $7 per item.
#
# Write a program that takes how many items are bought as an input and prints the total cost.
#
####################################################################################################
NUMBER_OF_ITEMS_TYPE_1 = 10;
NUMBER_OF_ITEMS_TYPE_2 = 100;
CHARGE_TYPE_1 = 12;
CHARGE_TYPE_2 = 10;
CHARGE_TYPE_3 = 7;
def problem1(number_of_items):
# Initially set total cost to zero.
cost = 0;
# If you buy less than "NUMBER_OF_ITEMS_TYPE_1", store charges "CHARGE_TYPE_1" per item.
if (number_of_items < NUMBER_OF_ITEMS_TYPE_1):
cost = number_of_items * CHARGE_TYPE_1;
# Store charges "CHARGE_TYPE_2" per item for the given condition.
elif (number_of_items >= NUMBER_OF_ITEMS_TYPE_1 and number_of_items < NUMBER_OF_ITEMS_TYPE_2):
cost = number_of_items * CHARGE_TYPE_2;
# If you buy more than "NUMBER_OF_ITEMS_TYPE_2", store charges "CHARGE_TYPE_3" per item.
elif (number_of_items >= NUMBER_OF_ITEMS_TYPE_2):
cost = number_of_items * CHARGE_TYPE_3;
print("{} items are bought, the total cost is: {}.".format(number_of_items, cost));
return;
####################################################################################################
#
# PROBLEM 2
#
# Write a program that generates a list of 20 random numbers between 1 and 100.
#
# (a) Print the list.
# (b) Print the average of the elements in the list.
# (c) Print the largest and smallest values in the list.
# (d) Print the second largest and second smallest entries in the list
# (e) Print how many even numbers are in the list.
#
####################################################################################################
import random;
def problem2():
# Create a list.
list = [];
# Insert 20 random numbers between 1 and 100, to the list.
for i in range(1, 20):
list.append(random.randint(1, 100));
# PART (a): Print the list.
print("Part (a): A list of 20 random numbers between 1 and 100 is generated.\n");
print(list);
print("\n***********************************************************\n");
# PART (b): Print the average of the elements in the list.
print("Part (b): The average of the elements in the list is evaluated.\n");
sum = 0;
for i in list:
sum += i;
print("The Average: {}".format(sum/20));
print("\n***********************************************************\n");
# PART (c): Print the largest and the samllest values in the list.
print("Part (c): The largest and the smallest values in the list are found.\n");
max = list[0];
min = list[0];
for i in list:
if max < i:
max = i;
elif min > i:
min = i;
print("The largest value is: {}".format(max));
print("The smallest value is: {}".format(min));
print("\n***********************************************************\n");
# PART (d): Print the second largest and the second smallest entries in the list.
print("Part (d): Second largest and the second smallest entries are found.\n");
second_max = list[0];
second_min = list[0];
for i in list:
if second_max < i and i != max:
second_max = i;
elif second_min > i and i != min:
second_min = i;
print("The second largest value is: {}".format(second_max));
print("The second smallest value is: {}".format(second_min));
print("\n***********************************************************\n");
# PART (e): Print how many even numbers are in the list.
print("Part (e): Total number of even numbers in the list is evaluated.\n");
count = 0;
for i in list:
if i % 2 == 0:
count += 1;
print("Count of Even Numbers: {}".format(count));
print("\n***********************************************************\n");
return;
####################################################################################################
#
# PROBLEM 3
#
# You are given a file named “blood-pressure.csv” which contains blood pressure measurement for some patients.
#
# The first column is for patient id and the second column is for blood pressure measurement in the format of mean[min-max] values.
#
# Write a function that takes this file as an input and do the folowing tasks:
#
# (a) Prints the lowest and highest blood pressure measurements amongs the patients. The output should be 108 and 180.
# (b) Prints the average of the mean values.
#
####################################################################################################
MAX_INTEGER = 65535
MIN_INTEGER = -65535
import csv
import re
def problem3(file):
max = MIN_INTEGER;
min = MAX_INTEGER;
sum = 0;
number_of_rows = 0;
# Open the CSV file using Python's built-in library: csv
with open(file, mode='r') as csv_file:
# Read the file as a dictionary.
csv_reader = csv.DictReader(csv_file)
# Now, we have a dictinary:
# [('id', '1'), ('Blood pressure systolic (mmHg) mean[min-max]', '135[113-166]')],
# [('id', '2'), ('Blood pressure systolic (mmHg) mean[min-max]', '140 [110-155]')], etc.
# Part (a): Print the lowest and the highest blood pressure measurements among the patients.
# The output should be: 108 and 180.
for row in csv_reader:
# Given the 'Blood ... [min-max]' key, find all the integers from the value string: '135[113-166]'.
# Store the integers (blood pressures) in a list.
list = [int(x) for x in re.findall('\d+', row['Blood pressure systolic (mmHg) mean[min-max]'])]
# PART A: MIN-MAX
# In every row, check the min-max values, update when necessary.
if (list[1] > max):
max = list[1];
elif (list[2] > max):
max = list[2];
if (list[1] < min):
min = list[1];
elif (list[2] < min):
min = list[2];
# PART B: THE AVERAGE
sum += list[0];
number_of_rows += 1;
print("PART (A): Print the lowest and the highest blood pressure measurements among the patients.\n");
print("The Lowest Blood Pressure is : {}".format(min))
print("The Highest Blood Pressure is: {}".format(max))
print("\n***********************************************************\n");
# Part (b): Print the average of the mean values.
print("PART (B): Print the average of the mean values.\n")
print("The Average is: {}".format(sum/number_of_rows));
print("\n***********************************************************\n");
return;
####################################################################################################
#
# PROBLEM 4
#
# You are given a csv (gdp_per_capita.csv) file for GDP per capita taken from World Bank.
#
# The file holds data from 1960 to 2017. Note that some data for certain years are missing.
#
# Write a function that takes this file as an input and do the following tasks for Turkey:
#
# (a) Calculate the yearly percentage increase compared to previous year and the find the year that has highest increase in terms of percentage.
# (b) Find the years that GDP per capita decreased compared to the previous year.
#
####################################################################################################
import collections
import csv
import re
def problem4(file):
# Open the CSV file using Python's built-in module: csv
file = open(file, mode = 'r')
# Read the file as a dictionary.
reader = csv.DictReader(file, delimiter=';')
# OrderedDict([('Country Name', 'Aruba'), ('1960', ''), ('1961', ''), ('1962', ''), ('1963', ''), ('1964', ''),
# ('1965', ''), ('1966', ''), ('1967', ''), ('1968', ''), ('1969', ''), ('1970', ''), ('1971', ''), ('1972', ''), ... ]), etc.
# Part (a): Create a dictionary to store yearly percentage increase.
increase_in_terms_of_percentage = {}
for row in reader:
# Part (a): For Turkey, evaluate the increase using formula: (current_gdp - previous_gdp) * 100 / (current_gdp)
if row['Country Name'] == 'Turkey':
# Part (a): Increase percentage is calculated for: [1961 and 2017] time interval.
# Part (a): (There is NO increase percentage for the year 1960.)
for i in range(2, len(reader.fieldnames)):
increase_in_terms_of_percentage[reader.fieldnames[i]] = (float(row[reader.fieldnames[i]]) - float(row[reader.fieldnames[i - 1]])) * 100 / (float(row[reader.fieldnames[i]]))
# Now, we have:
# {'1961': -78.73733197596287, '1962': 7.896086257857306, '1963': 11.753818665509044, '1964': 5.119424708452099,
# '1965': 4.1637664070723455, '1966': 13.387860733884622, '1967': 7.696841934220605, '1968': 8.330375671997261, ..} etc.
# Part (a): Calculate the yearly percentage increase compared to previous year and the find the year that has highest increase in terms of percentage.
print("PART (A): Find the year that has highest increase in terms of percentage.\n")
# Part (a): Using collections module, find the max increase and the year that has the max increase.
print("The YEAR with the HIGHEST INCREASE: {}".format(collections.Counter(increase_in_terms_of_percentage).most_common(1)))
print("\n***********************************************************\n");
# Part (b): Find the years that GDP per capita decreased compared to the previous year.
print("PART (B): Find the years that GDP per capita decreased compared to the previous year.\n")
print("GDP percentage decreased in the following YEARS: \n")
for key, value in increase_in_terms_of_percentage.items():
if value < 0:
print(key, end = ' ')
print("\n\n***********************************************************\n");
####################################################################################################
#
# PROBLEM 5
#
# Norway_new_car_sales_by_model.csv file contains information of the new car sales in Norway between the years 2007-2017.
#
# The dataset was obtained from www.kaggle.com web site. The dataset comprises of monthly car sale quantity for various manufacturers and models.
#
# Make columns shows the manufacturer and Pct column shows the percent share in monlty total sales.
#
# Using this dataset do the following tasks:
#
# (a) Print the number of unique manufacturers in this dataset.
# (b) Find the manufacturer that has the highest car sales in 2010?
#
####################################################################################################
import collections
import csv
def problem5(file):
# Open the CSV file using Python's built-in module: csv
# To avoid UnicodeDecodeError, errors='ignore' parameter is used.
file = open(file, mode = 'r', encoding="utf8", errors='ignore')
# Read the file as a dictionary.
reader = csv.DictReader(file, delimiter = ',')
# Now, we have a dictionary:
# [('Year', '2007'), ('Month', '1'), ('Make', 'Volkswagen '), ('Model', 'Volkswagen Passat'), ('Quantity', '1267'), ('Pct', '10')]
# [('Year', '2007'), ('Month', '1'), ('Make', 'Toyota '), ('Model', 'Toyota Rav4'), ('Quantity', '819'), ('Pct', '6.5')], etc.
# Part (a): Create a list to store all manufacturers.
manufacturer = []
# Part (b): Using collections module, create a counter to be able to count the car sales of each manufacturer.
quantity_of_car_sales = collections.Counter()
for row in reader:
# Part (a): Add all the manufacturers to the list, without considering if it is already in the list or not.
manufacturer.append(row['Make'])
# Part (a): Now, we have a list as follows:
# Part (a): ['Volkswagen ', 'Toyota ', 'Toyota ', 'Volkswagen ', 'Toyota ', 'Peugeot ', 'Skoda ', 'Toyota ', 'Ford ', 'Volvo ', ...]
# Part (b): In year 2010, for each manifacturer find the number of car sales and sum them up.
if row['Year'] == '2010':
quantity_of_car_sales[row['Make']] += int(row['Quantity'])
# Part (a): Using collections module, count the number of occurences of the manufacturers.
unique_manifacturers = collections.Counter(manufacturer)
# Part (a): Now, the keys are unique in this list:
# Part (a): Counter({'Toyota ': 492, 'Volkswagen ': 440, 'Volvo ': 294, 'Ford ': 246, 'Nissan ': 180, 'Audi ': 146,
# Part (a): 'Skoda ': 142, 'Peugeot ': 132, 'BMW ': 130, 'Mitsubishi ': 105, 'Mazda ': 80, 'Mercedes-Benz ': 63,]) etc.
# PART (a): Print the number of unique manufacturers in this dataset.
print("PART (A): Print the number of unique manufacturers in this dataset.\n")
print("The Number of Unique Manifacturers is: {}".format(len(unique_manifacturers.keys())))
print("\n***********************************************************\n");
# PART (b): Find the manufacturer that has the highest car sales in 2010.
print("PART (B): Find the manufacturer that has the highest car sales in 2010.\n")
print("The MANUFACTURER with HIGHEST CAR SALES in 2010: {}".format(quantity_of_car_sales.most_common(1)))
print("\n***********************************************************\n");
####################################################################################################
# TEST CODE
####################################################################################################
# PROBLEM ONE
problem1(1);
problem1(10);
problem1(100);
# PROBLEM TWO
problem2();
# PROBLEM THREE
problem3("blood_pressure.csv");
# PROBLEM FOUR
problem4("gdp_per_capita.csv");
# PROBLEM FIVE
problem5("norway_new_car_sales_by_model.csv");
####################################################################################################
| 43.83432 | 189 | 0.532667 | true | true | |
f7157fcc233e7ad5174d2ffad33f0e7b24b80a15 | 1,120 | py | Python | sagemaker_studio/containers/dashboard/src/app.py | NihalHarish/sagemaker-explaining-credit-decisions | e5965902d8901819a60f8c56517a82ddd17c1f95 | [
"Apache-2.0"
] | 80 | 2020-04-15T09:35:11.000Z | 2022-03-23T01:56:12.000Z | sagemaker_studio/containers/dashboard/src/app.py | IronOnet/sagemaker-explaining-credit-decisions | dbb8ea1a685412033c774c2a79cc1e5794438cf9 | [
"Apache-2.0"
] | 8 | 2020-04-16T16:53:09.000Z | 2022-02-06T17:07:02.000Z | sagemaker_studio/containers/dashboard/src/app.py | IronOnet/sagemaker-explaining-credit-decisions | dbb8ea1a685412033c774c2a79cc1e5794438cf9 | [
"Apache-2.0"
] | 28 | 2020-05-25T09:26:41.000Z | 2022-01-25T22:23:54.000Z | from pathlib import Path
import streamlit as st
from package import utils
from pages import local_page, global_page
from shared import list_explanation_groups
def explanation_group_selectbox():
paths = list_explanation_groups()
path = st.sidebar.selectbox(
label='Select explanation group:',
options=paths,
format_func=lambda e: e.split('/')[-2]
)
return path
def explanation_scope_selectbox():
explanation_scope = st.sidebar.selectbox(
label='Select explanation scope:',
options=["local", "global"],
index=1,
format_func=lambda e: {'local': 'Individual', 'global': 'Group'}[e]
)
return explanation_scope
if __name__ == "__main__":
current_folder = utils.get_current_folder(globals())
st.sidebar.markdown('# Explanations Dashboard')
explanation_group_path = explanation_group_selectbox()
explanation_scope = explanation_scope_selectbox()
if explanation_scope == "local":
local_page.show(explanation_group_path)
elif explanation_scope == "global":
global_page.show(explanation_group_path)
| 28.717949 | 75 | 0.707143 | from pathlib import Path
import streamlit as st
from package import utils
from pages import local_page, global_page
from shared import list_explanation_groups
def explanation_group_selectbox():
paths = list_explanation_groups()
path = st.sidebar.selectbox(
label='Select explanation group:',
options=paths,
format_func=lambda e: e.split('/')[-2]
)
return path
def explanation_scope_selectbox():
explanation_scope = st.sidebar.selectbox(
label='Select explanation scope:',
options=["local", "global"],
index=1,
format_func=lambda e: {'local': 'Individual', 'global': 'Group'}[e]
)
return explanation_scope
if __name__ == "__main__":
current_folder = utils.get_current_folder(globals())
st.sidebar.markdown('# Explanations Dashboard')
explanation_group_path = explanation_group_selectbox()
explanation_scope = explanation_scope_selectbox()
if explanation_scope == "local":
local_page.show(explanation_group_path)
elif explanation_scope == "global":
global_page.show(explanation_group_path)
| true | true |
f715800c50b2c0c85b8363141732d1ea4e6cedf4 | 11,896 | py | Python | opflexagent/rpc.py | shyam81295/python-opflex-agent | 3b564c93d62734354eea059afec7dce713225872 | [
"Apache-2.0"
] | null | null | null | opflexagent/rpc.py | shyam81295/python-opflex-agent | 3b564c93d62734354eea059afec7dce713225872 | [
"Apache-2.0"
] | null | null | null | opflexagent/rpc.py | shyam81295/python-opflex-agent | 3b564c93d62734354eea059afec7dce713225872 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import rpc as n_rpc
from neutron.common import topics
from oslo_log import helpers as log
from oslo_log import log as logging
import oslo_messaging
LOG = logging.getLogger(__name__)
TOPIC_OPFLEX = 'opflex'
ENDPOINT = 'endpoint'
VRF = 'vrf'
NOTIFY_VRF = 'notify-vrf'
class AgentNotifierApi(object):
"""Server side notification API:
- Version 1.3: add notify vrf
"""
BASE_RPC_API_VERSION = '1.3'
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.BASE_RPC_API_VERSION)
self.client = n_rpc.get_client(target)
self.topic_port_update = topics.get_topic_name(topic, topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic, topics.PORT,
topics.DELETE)
self.topic_subnet_update = topics.get_topic_name(topic, topics.SUBNET,
topics.UPDATE)
self.topic_opflex_notify_vrf = topics.get_topic_name(
topic, TOPIC_OPFLEX, NOTIFY_VRF, topics.UPDATE)
self.topic_opflex_endpoint_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, ENDPOINT, topics.UPDATE)
self.topic_opflex_vrf_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, VRF, topics.UPDATE)
def port_update(self, context, port):
host = port.get('binding:host_id')
if host:
cctxt = self.client.prepare(
server=host, topic=self.topic_port_update, version='1.1')
cctxt.cast(context, 'port_update', port=port)
def port_delete(self, context, port):
cctxt = self.client.prepare(fanout=True, topic=self.topic_port_delete,
version='1.1')
cctxt.cast(context, 'port_delete', port=port)
def subnet_update(self, context, subnet):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_subnet_update,
version='1.1')
cctxt.cast(context, 'subnet_update', subnet=subnet)
def opflex_notify_vrf(self, context, vrf):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_notify_vrf,
version='1.3')
cctxt.cast(context, 'opflex_notify_vrf', vrf=vrf)
def opflex_endpoint_update(self, context, details, host=None):
cctxt = self.client.prepare(
topic=self.topic_opflex_endpoint_update, server=host,
version='1.2')
cctxt.cast(context, 'opflex_endpoint_update', details=details)
def opflex_vrf_update(self, context, details):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_vrf_update,
version='1.2')
cctxt.cast(context, 'opflex_vrf_update', details=details)
class GBPServerRpcApi(object):
"""Agent-side RPC (stub) for agent-to-plugin interaction.
Version 1.1: add async request_* APIs
"""
GBP_RPC_VERSION = "1.1"
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.GBP_RPC_VERSION)
self.client = n_rpc.get_client(target)
@log.log_method_call
def get_gbp_details(self, context, agent_id, device=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details', agent_id=agent_id,
device=device, host=host)
@log.log_method_call
def get_gbp_details_list(self, context, agent_id, devices=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details_list', agent_id=agent_id,
devices=devices, host=host)
@log.log_method_call
def get_vrf_details(self, context, agent_id, vrf_id=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details', agent_id=agent_id,
vrf_id=vrf_id, host=host)
@log.log_method_call
def get_vrf_details_list(self, context, agent_id, vrf_ids=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details_list', agent_id=agent_id,
vrf_ids=vrf_ids, host=host)
@log.log_method_call
def request_endpoint_details(self, context, agent_id, request=None,
host=None):
# Request is a tuple with the device_id as first element, and the
# request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_endpoint_details_list(self, context, agent_id, requests=None,
host=None):
# Requests is a list of tuples with the device_id as first element,
# and the request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def request_vrf_details(self, context, agent_id, request=None, host=None):
# Request is a tuple with the vrf_id as first element, and the
# request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_vrf_details_list(self, context, agent_id, requests=None,
host=None):
# Requests is a list of tuples with the vrf_id as first element,
# and the request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def ip_address_owner_update(self, context, agent_id, ip_owner_info,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'ip_address_owner_update', agent_id=agent_id,
ip_owner_info=ip_owner_info, host=host)
class GBPServerRpcCallback(object):
"""Plugin-side RPC (implementation) for agent-to-plugin interaction."""
# History
# 1.0 Initial version
# 1.1 Async request_* APIs
RPC_API_VERSION = "1.1"
target = oslo_messaging.Target(version=RPC_API_VERSION)
def __init__(self, gbp_driver, agent_notifier=None):
self.gbp_driver = gbp_driver
self.agent_notifier = agent_notifier
def get_gbp_details(self, context, **kwargs):
return self.gbp_driver.get_gbp_details(context, **kwargs)
def get_gbp_details_list(self, context, **kwargs):
return [
self.get_gbp_details(
context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def get_vrf_details(self, context, **kwargs):
return self.gbp_driver.get_vrf_details(context, **kwargs)
def get_vrf_details_list(self, context, **kwargs):
return [
self.get_vrf_details(
context,
vrf_id=vrf_id,
**kwargs
)
for vrf_id in kwargs.pop('vrf_ids', [])
]
def request_endpoint_details(self, context, **kwargs):
result = [self.gbp_driver.request_endpoint_details(context, **kwargs)]
# Notify the agent back once the answer is calculated
if result[0]:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_endpoint_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_endpoint_details(
context, request=request, **kwargs)
if details:
result.append(details)
# Notify the agent back once the answer is calculated
# Exclude empty answers as an error as occurred and the agent might
# want to retry
if result:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_vrf_details(self, context, **kwargs):
result = [self.gbp_driver.request_vrf_details(context, **kwargs)]
# Notify the agent back once the answer is calculated
if result[0]:
self.agent_notifier.opflex_vrf_update(context, result,
host=kwargs.get('host'))
def request_vrf_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_vrf_details(
context, request=request, **kwargs)
if details:
result.append(details)
# Notify the agent back once the answer is calculated
# Exclude empty answers as an error as occurred and the agent might
# want to retry
if result:
self.agent_notifier.opflex_vrf_update(
context, [x for x in result if x], host=kwargs.get('host'))
def ip_address_owner_update(self, context, **kwargs):
self.gbp_driver.ip_address_owner_update(context, **kwargs)
class OpenstackRpcMixin(object):
"""A mix-in that enable Opflex agent
support in agent implementations.
"""
target = oslo_messaging.Target(version='1.3')
def subnet_update(self, context, subnet):
self.updated_vrf.add(subnet['tenant_id'])
LOG.debug("subnet_update message processed for subnet %s",
subnet['id'])
def opflex_notify_vrf(self, context, vrf):
self.updated_vrf.add(vrf)
LOG.debug("opflex_notify_vrf message processed for vrf %s", vrf)
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def opflex_endpoint_update(self, context, details):
self._opflex_endpoint_update(context, details)
def opflex_vrf_update(self, context, details):
self._opflex_vrf_update(self, context, details)
| 40.879725 | 79 | 0.634247 |
from neutron.common import rpc as n_rpc
from neutron.common import topics
from oslo_log import helpers as log
from oslo_log import log as logging
import oslo_messaging
LOG = logging.getLogger(__name__)
TOPIC_OPFLEX = 'opflex'
ENDPOINT = 'endpoint'
VRF = 'vrf'
NOTIFY_VRF = 'notify-vrf'
class AgentNotifierApi(object):
BASE_RPC_API_VERSION = '1.3'
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.BASE_RPC_API_VERSION)
self.client = n_rpc.get_client(target)
self.topic_port_update = topics.get_topic_name(topic, topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic, topics.PORT,
topics.DELETE)
self.topic_subnet_update = topics.get_topic_name(topic, topics.SUBNET,
topics.UPDATE)
self.topic_opflex_notify_vrf = topics.get_topic_name(
topic, TOPIC_OPFLEX, NOTIFY_VRF, topics.UPDATE)
self.topic_opflex_endpoint_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, ENDPOINT, topics.UPDATE)
self.topic_opflex_vrf_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, VRF, topics.UPDATE)
def port_update(self, context, port):
host = port.get('binding:host_id')
if host:
cctxt = self.client.prepare(
server=host, topic=self.topic_port_update, version='1.1')
cctxt.cast(context, 'port_update', port=port)
def port_delete(self, context, port):
cctxt = self.client.prepare(fanout=True, topic=self.topic_port_delete,
version='1.1')
cctxt.cast(context, 'port_delete', port=port)
def subnet_update(self, context, subnet):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_subnet_update,
version='1.1')
cctxt.cast(context, 'subnet_update', subnet=subnet)
def opflex_notify_vrf(self, context, vrf):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_notify_vrf,
version='1.3')
cctxt.cast(context, 'opflex_notify_vrf', vrf=vrf)
def opflex_endpoint_update(self, context, details, host=None):
cctxt = self.client.prepare(
topic=self.topic_opflex_endpoint_update, server=host,
version='1.2')
cctxt.cast(context, 'opflex_endpoint_update', details=details)
def opflex_vrf_update(self, context, details):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_vrf_update,
version='1.2')
cctxt.cast(context, 'opflex_vrf_update', details=details)
class GBPServerRpcApi(object):
GBP_RPC_VERSION = "1.1"
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.GBP_RPC_VERSION)
self.client = n_rpc.get_client(target)
@log.log_method_call
def get_gbp_details(self, context, agent_id, device=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details', agent_id=agent_id,
device=device, host=host)
@log.log_method_call
def get_gbp_details_list(self, context, agent_id, devices=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details_list', agent_id=agent_id,
devices=devices, host=host)
@log.log_method_call
def get_vrf_details(self, context, agent_id, vrf_id=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details', agent_id=agent_id,
vrf_id=vrf_id, host=host)
@log.log_method_call
def get_vrf_details_list(self, context, agent_id, vrf_ids=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details_list', agent_id=agent_id,
vrf_ids=vrf_ids, host=host)
@log.log_method_call
def request_endpoint_details(self, context, agent_id, request=None,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_endpoint_details_list(self, context, agent_id, requests=None,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def request_vrf_details(self, context, agent_id, request=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_vrf_details_list(self, context, agent_id, requests=None,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def ip_address_owner_update(self, context, agent_id, ip_owner_info,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'ip_address_owner_update', agent_id=agent_id,
ip_owner_info=ip_owner_info, host=host)
class GBPServerRpcCallback(object):
RPC_API_VERSION = "1.1"
target = oslo_messaging.Target(version=RPC_API_VERSION)
def __init__(self, gbp_driver, agent_notifier=None):
self.gbp_driver = gbp_driver
self.agent_notifier = agent_notifier
def get_gbp_details(self, context, **kwargs):
return self.gbp_driver.get_gbp_details(context, **kwargs)
def get_gbp_details_list(self, context, **kwargs):
return [
self.get_gbp_details(
context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def get_vrf_details(self, context, **kwargs):
return self.gbp_driver.get_vrf_details(context, **kwargs)
def get_vrf_details_list(self, context, **kwargs):
return [
self.get_vrf_details(
context,
vrf_id=vrf_id,
**kwargs
)
for vrf_id in kwargs.pop('vrf_ids', [])
]
def request_endpoint_details(self, context, **kwargs):
result = [self.gbp_driver.request_endpoint_details(context, **kwargs)]
if result[0]:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_endpoint_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_endpoint_details(
context, request=request, **kwargs)
if details:
result.append(details)
if result:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_vrf_details(self, context, **kwargs):
result = [self.gbp_driver.request_vrf_details(context, **kwargs)]
if result[0]:
self.agent_notifier.opflex_vrf_update(context, result,
host=kwargs.get('host'))
def request_vrf_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_vrf_details(
context, request=request, **kwargs)
if details:
result.append(details)
if result:
self.agent_notifier.opflex_vrf_update(
context, [x for x in result if x], host=kwargs.get('host'))
def ip_address_owner_update(self, context, **kwargs):
self.gbp_driver.ip_address_owner_update(context, **kwargs)
class OpenstackRpcMixin(object):
target = oslo_messaging.Target(version='1.3')
def subnet_update(self, context, subnet):
self.updated_vrf.add(subnet['tenant_id'])
LOG.debug("subnet_update message processed for subnet %s",
subnet['id'])
def opflex_notify_vrf(self, context, vrf):
self.updated_vrf.add(vrf)
LOG.debug("opflex_notify_vrf message processed for vrf %s", vrf)
def port_update(self, context, **kwargs):
port = kwargs.get('port')
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def opflex_endpoint_update(self, context, details):
self._opflex_endpoint_update(context, details)
def opflex_vrf_update(self, context, details):
self._opflex_vrf_update(self, context, details)
| true | true |
f715802eeda042cbb9bf7a01b8eb94abfede69c2 | 2,706 | py | Python | modules/cmderr.py | patataofcourse/styleventer-archive | dc4b82f2903f91990fa9236cb67a9dd92e3e1a2f | [
"MIT"
] | 1 | 2021-01-28T16:22:32.000Z | 2021-01-28T16:22:32.000Z | modules/cmderr.py | alexdevteam/styleventer-archive | 303f280049d480b21c6e804e236c90fe3475a074 | [
"MIT"
] | 1 | 2021-01-16T22:14:36.000Z | 2021-01-16T22:14:36.000Z | modules/cmderr.py | patataofcourse/styleventer-archive | dc4b82f2903f91990fa9236cb67a9dd92e3e1a2f | [
"MIT"
] | 1 | 2021-01-16T22:01:59.000Z | 2021-01-16T22:01:59.000Z | from discord.ext import commands
import discord, sys, os
import traceback
import datetime
from libs import settings
async def oncmderror(ctx: discord.ext.commands.Context, error):
if type(error) == commands.CommandOnCooldown:
if int(error.retry_after) == 0:
await ctx.send("Wait a few seconds before using this command again!")
else:
await ctx.send("Wait at least {} more seconds to use this command again!".format(int(error.retry_after)))
elif type(error) == commands.CommandNotFound:
setting = settings.get_setting("prefix_response_channels", [ctx.message.guild.id])
if setting is None:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif str(ctx.message.channel.id) in setting:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif type(error) == commands.errors.NotOwner:
await ctx.send("That command is only usable by aleok.")
elif type(error) == commands.errors.MissingRequiredArgument:
cmdname = ctx.message.content.split()[0].lstrip(ctx.bot.command_prefix)
command = next(filter(lambda cmd: cmdname in cmd.aliases or cmdname == cmd.name, ctx.bot.commands))
await ctx.send(f"Syntax: `'{command.name} {command.usage}`")
elif type(error) == commands.errors.BadArgument:
await ctx.send(f"Wrong syntax ({str(error)}). Try using `'help command`")
elif type(error) == commands.errors.ExpectedClosingQuoteError:
await ctx.send("Expected a closing quote (\")")
elif type(error) == commands.errors.UnexpectedQuoteError:
await ctx.send("Unexpected quote mark (\") in non-quoted argument")
else:
error_str = "\n".join(traceback.format_exception(type(error), error, error.__traceback__))
timenow = datetime.datetime.now()
errorcode = f"{timenow.year}{timenow.month:02}{timenow.day:02}{timenow.hour:02}{timenow.minute:02}{timenow.second:02}"
errorcode = format(int(errorcode), "X")
await ctx.send(
"There was an unknown error! Please send the following error code to aleok: `{}`".format(errorcode))
try:
owner = ctx.bot.get_user(ctx.bot.owner_id)
if owner is None:
await ctx.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
else:
await owner.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
except Exception as e:
await ctx.send(f"Error IN sending error, yay! (internal cmderr error: {e})")
print(error_str)
def setup(bot, **kwargs):
bot.on_command_error = oncmderror
| 52.038462 | 126 | 0.65558 | from discord.ext import commands
import discord, sys, os
import traceback
import datetime
from libs import settings
async def oncmderror(ctx: discord.ext.commands.Context, error):
if type(error) == commands.CommandOnCooldown:
if int(error.retry_after) == 0:
await ctx.send("Wait a few seconds before using this command again!")
else:
await ctx.send("Wait at least {} more seconds to use this command again!".format(int(error.retry_after)))
elif type(error) == commands.CommandNotFound:
setting = settings.get_setting("prefix_response_channels", [ctx.message.guild.id])
if setting is None:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif str(ctx.message.channel.id) in setting:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif type(error) == commands.errors.NotOwner:
await ctx.send("That command is only usable by aleok.")
elif type(error) == commands.errors.MissingRequiredArgument:
cmdname = ctx.message.content.split()[0].lstrip(ctx.bot.command_prefix)
command = next(filter(lambda cmd: cmdname in cmd.aliases or cmdname == cmd.name, ctx.bot.commands))
await ctx.send(f"Syntax: `'{command.name} {command.usage}`")
elif type(error) == commands.errors.BadArgument:
await ctx.send(f"Wrong syntax ({str(error)}). Try using `'help command`")
elif type(error) == commands.errors.ExpectedClosingQuoteError:
await ctx.send("Expected a closing quote (\")")
elif type(error) == commands.errors.UnexpectedQuoteError:
await ctx.send("Unexpected quote mark (\") in non-quoted argument")
else:
error_str = "\n".join(traceback.format_exception(type(error), error, error.__traceback__))
timenow = datetime.datetime.now()
errorcode = f"{timenow.year}{timenow.month:02}{timenow.day:02}{timenow.hour:02}{timenow.minute:02}{timenow.second:02}"
errorcode = format(int(errorcode), "X")
await ctx.send(
"There was an unknown error! Please send the following error code to aleok: `{}`".format(errorcode))
try:
owner = ctx.bot.get_user(ctx.bot.owner_id)
if owner is None:
await ctx.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
else:
await owner.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
except Exception as e:
await ctx.send(f"Error IN sending error, yay! (internal cmderr error: {e})")
print(error_str)
def setup(bot, **kwargs):
bot.on_command_error = oncmderror
| true | true |
f71581382f809688e495e7651dfc11918e82e216 | 884 | py | Python | awwardsApp/urls.py | umunadine/Awwards | 1a862ef64c195e6ab9b38c8e1faf35f224354dbb | [
"MIT"
] | null | null | null | awwardsApp/urls.py | umunadine/Awwards | 1a862ef64c195e6ab9b38c8e1faf35f224354dbb | [
"MIT"
] | null | null | null | awwardsApp/urls.py | umunadine/Awwards | 1a862ef64c195e6ab9b38c8e1faf35f224354dbb | [
"MIT"
] | null | null | null | from django.conf.urls import url,include
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.index,name='index'),
url(r'^accounts/profile/', views.my_profile, name='my_profile'),
url(r'register/',views.register, name='register'),
url(r'project/(\d+)',views.rate_project,name='rate-project'),
url(r'profile/(\d+)',views.profile,name='profile'),
url(r'my_profile',views.my_profile,name='my_profile'),
url(r'^new/project$', views.new_project, name='new_project'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^ratings/', include('star_ratings.urls', namespace='ratings')),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 38.434783 | 81 | 0.707014 | from django.conf.urls import url,include
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.index,name='index'),
url(r'^accounts/profile/', views.my_profile, name='my_profile'),
url(r'register/',views.register, name='register'),
url(r'project/(\d+)',views.rate_project,name='rate-project'),
url(r'profile/(\d+)',views.profile,name='profile'),
url(r'my_profile',views.my_profile,name='my_profile'),
url(r'^new/project$', views.new_project, name='new_project'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^ratings/', include('star_ratings.urls', namespace='ratings')),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| true | true |
f71581754c1df790c4b96c28981d61f8e5506370 | 89 | py | Python | samples/helloworld.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 23 | 2020-12-20T03:39:30.000Z | 2022-03-23T15:47:10.000Z | samples/helloworld.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 15 | 2020-12-21T01:12:22.000Z | 2021-04-19T10:40:11.000Z | samples/helloworld.py | neumond/minpiler | 2e37a9e0854383d3974af38e1cb2da0ecb8e2108 | [
"MIT"
] | 2 | 2022-02-12T19:19:50.000Z | 2022-02-12T21:33:35.000Z | from minpiler.typeshed import M, message1
M.print('Hello world!')
message1.printFlush()
| 17.8 | 41 | 0.775281 | from minpiler.typeshed import M, message1
M.print('Hello world!')
message1.printFlush()
| true | true |
f715818477d40bfbaf00925d174a3b2a99345b43 | 853 | py | Python | reviewboard/reviews/evolutions/file_attachments.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 921 | 2015-01-01T15:26:28.000Z | 2022-03-29T11:30:38.000Z | reviewboard/reviews/evolutions/file_attachments.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 5 | 2015-03-17T18:57:47.000Z | 2020-10-02T13:24:31.000Z | reviewboard/reviews/evolutions/file_attachments.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 285 | 2015-01-12T06:24:36.000Z | 2022-03-29T11:03:50.000Z | from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ReviewRequest', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequest', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('Review', 'file_attachment_comments', models.ManyToManyField,
related_model='reviews.FileAttachmentComment'),
AddField('ReviewRequestDraft', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequestDraft', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment')
]
| 40.619048 | 78 | 0.731536 | from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ReviewRequest', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequest', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('Review', 'file_attachment_comments', models.ManyToManyField,
related_model='reviews.FileAttachmentComment'),
AddField('ReviewRequestDraft', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequestDraft', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment')
]
| true | true |
f71581f934458fc27232e1abba28dfc2d9fb50c7 | 2,639 | py | Python | trustpayments/models/transaction_comment_create.py | TrustPayments/python-sdk | 6fde6eb8cfce270c3612a2903a845c13018c3bb9 | [
"Apache-2.0"
] | 2 | 2020-01-16T13:24:06.000Z | 2020-11-21T17:40:17.000Z | postfinancecheckout/models/transaction_comment_create.py | pfpayments/python-sdk | b8ef159ea3c843a8d0361d1e0b122a9958adbcb4 | [
"Apache-2.0"
] | 4 | 2019-10-14T17:33:23.000Z | 2021-10-01T14:49:11.000Z | postfinancecheckout/models/transaction_comment_create.py | pfpayments/python-sdk | b8ef159ea3c843a8d0361d1e0b122a9958adbcb4 | [
"Apache-2.0"
] | 2 | 2019-10-15T14:17:10.000Z | 2021-09-17T13:07:09.000Z | # coding: utf-8
import pprint
import six
from enum import Enum
from . import AbstractTransactionCommentActive
class TransactionCommentCreate(AbstractTransactionCommentActive):
swagger_types = {
'transaction': 'int',
}
attribute_map = {
'transaction': 'transaction',
}
_transaction = None
def __init__(self, **kwargs):
self.discriminator = None
self.transaction = kwargs.get('transaction')
super().__init__(**kwargs)
self.swagger_types.update(super().swagger_types)
self.attribute_map.update(super().attribute_map)
@property
def transaction(self):
"""Gets the transaction of this TransactionCommentCreate.
:return: The transaction of this TransactionCommentCreate.
:rtype: int
"""
return self._transaction
@transaction.setter
def transaction(self, transaction):
"""Sets the transaction of this TransactionCommentCreate.
:param transaction: The transaction of this TransactionCommentCreate.
:type: int
"""
if transaction is None:
raise ValueError("Invalid value for `transaction`, must not be `None`")
self._transaction = transaction
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(TransactionCommentCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TransactionCommentCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 26.39 | 83 | 0.56726 |
import pprint
import six
from enum import Enum
from . import AbstractTransactionCommentActive
class TransactionCommentCreate(AbstractTransactionCommentActive):
swagger_types = {
'transaction': 'int',
}
attribute_map = {
'transaction': 'transaction',
}
_transaction = None
def __init__(self, **kwargs):
self.discriminator = None
self.transaction = kwargs.get('transaction')
super().__init__(**kwargs)
self.swagger_types.update(super().swagger_types)
self.attribute_map.update(super().attribute_map)
@property
def transaction(self):
return self._transaction
@transaction.setter
def transaction(self, transaction):
if transaction is None:
raise ValueError("Invalid value for `transaction`, must not be `None`")
self._transaction = transaction
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(TransactionCommentCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TransactionCommentCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f715842fcaf7266d175e63d68638aed9f2e32e69 | 7,994 | py | Python | tagger_ui/ui_model/annotated_images_manager.py | RobertMcCarter/animal-finder | 5ac839a65df62ab312e440ce43416727492e84d8 | [
"MIT"
] | null | null | null | tagger_ui/ui_model/annotated_images_manager.py | RobertMcCarter/animal-finder | 5ac839a65df62ab312e440ce43416727492e84d8 | [
"MIT"
] | null | null | null | tagger_ui/ui_model/annotated_images_manager.py | RobertMcCarter/animal-finder | 5ac839a65df62ab312e440ce43416727492e84d8 | [
"MIT"
] | null | null | null | """
The business model core of the application.
"""
from typing import List, Union
from PIL import Image
from .annotated_image import AnnotatedImage
from .scaled_region2d import ScaledRegion2d
from .timer import Timer
from src.model import Size2d, Region2d
def clearImagesOutsideRange(
annotatedImages: List[AnnotatedImage],
currentIndex: int,
keepPrevious: int = 10,
keepNext: int = 10,
) -> None:
"""Clear out of memory any loaded images that are outside the given
range (so that we don't continue to collect in-memory images and
consume the user's entire RAM.
"""
# First, figure out our "keep" images in memory range
startIndex = max(0, currentIndex - keepPrevious)
endIndex = min(currentIndex + keepNext, len(annotatedImages) - 1)
# Clear out images outside our range
for i in range(0, startIndex):
annotatedImages[i].image = None
for i in range(endIndex + 1, len(annotatedImages)):
annotatedImages[i].image = None
class AnnotatedImagesManager:
"""Maps the various regions on an annotated image to the screen rectangles
being displayed
"""
def __init__(self, annotatedImages: List[AnnotatedImage]):
assert annotatedImages
self._currentIndex = 0
self.maxViewed = 0
self._annotatedImages = annotatedImages
# ##############################################################################################
# region Properties
# ##############################################################################################
@property
def current(self) -> AnnotatedImage:
"""The currently selected/viewed annotated image"""
return self._annotatedImages[self._currentIndex]
@property
def currentIndex(self) -> int:
"""The current index within the ordered list of images"""
return self._currentIndex
@property
def images(self) -> List[AnnotatedImage]:
"""The ordered list of annotated images"""
return self._annotatedImages
# The current rectangle the user is actively drawing on the screen
# (which could be different from the image coordinates due to a small screen or window size)
activeRegion: Union[ScaledRegion2d, None] = None
def __len__(self):
"""The number of annotated images"""
return len(self._annotatedImages)
@property
def windowSize(self) -> Size2d:
"""The current size of the window where the image is displayed"""
return self._windowSize
@property
def scale(self) -> float:
"""The current scale factor to go from the original image to the scaled (likely down) image"""
return self.current.scale
@property
def regions(self) -> List[ScaledRegion2d]:
"""The ordered collection of region view-models of interest for this image"""
return self.current.regions
# The current rectangle the user is actively drawing on the screen
# (which could be different from the image coordinates due to a small screen or window size)
activeRegion: Union[ScaledRegion2d, None] = None
# The maximum index within the sorted list of annotated images that the user
# has viewed (and presumably processed)
maxViewed: int
# The directory of images this annotated image manager collection represents
saveFileName: str
# endregion
# ##############################################################################################
# region Methods
# ##############################################################################################
def isValidIndex(self, index: int) -> bool:
"""Test if the given index is valid"""
return 0 <= index < len(self._annotatedImages)
def addActiveRegion(self) -> None:
"""Adds a new region to the current image, and returns the scaled region 2d view model"""
if self.activeRegion is None:
return
self.activeRegion.canvasRectId = (
0 # It no longer belongs to that canvas rectangle
)
self.current.addRegion(self.activeRegion)
# User has "used up" the current active region
self.activeRegion = None
def updateActiveScreenRegion(self, screenRegion: Region2d) -> ScaledRegion2d:
"""The view should call this when the active region is changed
(likely the user dragging the mouse).
Returns the active scaled region.
"""
if self.activeRegion is None:
self.activeRegion = ScaledRegion2d(screenRegion)
else:
self.activeRegion.screenRegion = screenRegion
# Now re-scale the screen region to get the "true" image region
self.activeRegion.updateImageFromScreen(self.scale)
return self.activeRegion
def onWindowResized(self, newWindowSize: Size2d) -> Union[float, None]:
"""Update our current image to have the correct scale for the new canvas size
Scale the image according to our current canvas size
Returns the scale factor used to shrink the image to the size of the window,
or None if the image did not change
"""
# Save the new window size
self._windowSize = newWindowSize
# Scale the current image to this size
scale = self.current.scaleImageForSize(newWindowSize)
if scale:
# We need to resize our Tk wrapper image
self.current.wrapImageForTk()
# We changed the scaling factor, so we need to re-scale the active region too
if self.activeRegion:
self.activeRegion.updateScreenFromImage(scale)
def scanForTaggedIndex(self, direction: int) -> int | None:
"""Scan through starting at the current image index for the next
image that is tagged.
direction is either +1 or -1 to control direction.
"""
i = self.currentIndex
while 0 <= i < len(self._annotatedImages):
i += direction
if self._annotatedImages[i].isTagged:
return i
return None
def moveToImage(self, index: int):
"""Open the image with the given index
(into our ordered collection of annotated images that we received from the model layer)
"""
assert self.isValidIndex(index)
# Store the index that we're looking at
self._currentIndex = index
# Update our max viewed index
self.maxViewed = max(self.maxViewed, self._currentIndex)
# Ensure the image is loaded
if self.current.image is None:
self.current.image = Image.open(self.current.filePath)
self.current.image.load()
# Scale the image so it fits while retaining the correct aspect ratio
# Only scale if we haven't already previously scaled the image (which is slow)
# Store it back in our domain logic layer for faster access
self.current.scaleImageForSize(self._windowSize)
# Resize the image for the UI layer, and wrap it for Tk
self.current.loadImage()
self.current.scaleImageForSize(self.windowSize)
self.current.wrapImageForTk()
# Clear images outside of our "keep" window so we don't keep growing our memory footprint!
clearImagesOutsideRange(self._annotatedImages, index, 10, 10)
# endregion
# ##############################################################################################
# region Private data members
# ##############################################################################################
# The collection of annotated images we need to process for our test set
_annotatedImages: List[AnnotatedImage]
# The index into the _annotatedImages array,
# So effectively, which annotated image are we currently looking at?
_currentIndex: int = 0
# The size of the window that is displaying our images
_windowSize: Size2d = Size2d(500, 500)
# endregion
| 37.35514 | 102 | 0.621091 | from typing import List, Union
from PIL import Image
from .annotated_image import AnnotatedImage
from .scaled_region2d import ScaledRegion2d
from .timer import Timer
from src.model import Size2d, Region2d
def clearImagesOutsideRange(
annotatedImages: List[AnnotatedImage],
currentIndex: int,
keepPrevious: int = 10,
keepNext: int = 10,
) -> None:
startIndex = max(0, currentIndex - keepPrevious)
endIndex = min(currentIndex + keepNext, len(annotatedImages) - 1)
for i in range(0, startIndex):
annotatedImages[i].image = None
for i in range(endIndex + 1, len(annotatedImages)):
annotatedImages[i].image = None
class AnnotatedImagesManager:
def __init__(self, annotatedImages: List[AnnotatedImage]):
assert annotatedImages
self._currentIndex = 0
self.maxViewed = 0
self._annotatedImages = annotatedImages
| true | true |
f715861adc117fbbd75adf3f5e6a1228542c06dc | 97,810 | py | Python | src/sos/step_executor.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | null | null | null | src/sos/step_executor.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | null | null | null | src/sos/step_executor.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import ast
import copy
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import List
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg, decode_msg
from .eval import SoS_eval, SoS_exec, accessed_vars, KeepOnlyImportAndDefine
from .executor_utils import (
__named_output__,
__null_func__,
__output_from__,
__traced__,
clear_output,
create_task,
get_traceback_msg,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
ExecuteError,
)
from .syntax import (
SOS_DEPENDS_OPTIONS,
SOS_INPUT_OPTIONS,
SOS_OUTPUT_OPTIONS,
SOS_TARGETS_OPTIONS,
)
from .targets import (
RemovedTarget,
RuntimeInfo,
UnavailableLock,
sos_variable,
UnknownTarget,
dynamic,
file_target,
sos_step,
sos_targets,
invalid_target
)
from .tasks import MasterTaskParams, TaskFile
from .utils import (
ArgumentError,
StopInputGroup,
TerminateExecution,
env,
get_traceback,
short_repr,
ProcessKilled,
get_localhost_ip,
textMD5,
)
__all__: List = []
class TaskManager:
# manage tasks created by the step
def __init__(self, num_tasks, trunk_size, trunk_workers):
super(TaskManager, self).__init__()
self.num_tasks = num_tasks
import math
self._slots = [[] for x in range(math.ceil(num_tasks / trunk_size))]
self._last_slot_size = (
trunk_size if (num_tasks % trunk_size == 0) else (num_tasks % trunk_size)
)
self.trunk_size = trunk_size
self.trunk_workers = trunk_workers
self._submitted_tasks = []
# entire groups
self._unsubmitted_slots = []
# collection of partial groups if some tasks are completed
self._unsubmitted_tasks = []
# derived from _unsubmitted_slots
self._all_ids = []
self._all_output = []
#
self._terminate = False
#
self._tags = {}
def set(self, idx, task_def):
slot = idx // self.trunk_size
#
# slot [
# [idx, None] <- for empty
# [idx, taskdef] <- for non empty
# ]
self._slots[slot].append([idx, task_def])
# the slot is full
if len(self._slots[slot]) == self.trunk_size or (
slot == len(self._slots) - 1
and len(self._slots[slot]) == self._last_slot_size
):
# if there are valida tasks
if not all([x[1] is None for x in self._slots[slot]]):
# remove empty tasks and sort by id
if self.trunk_size == 1 or any(x[1] is None for x in self._slots[slot]):
# if partial, sent to partial list
self._unsubmitted_tasks.extend(
[x[1] for x in self._slots[slot] if x[1] is not None]
)
else:
self._unsubmitted_slots.append(
sorted(self._slots[slot], key=lambda x: x[0])
)
# clear skit
self._slots[slot] = []
if not task_def:
return
if isinstance(task_def[2], Sequence):
self._all_output.extend(task_def[2])
self._all_ids.append(task_def[0])
self._tags[task_def[0]] = task_def[1].tags
def tags(self, task_id):
return self._tags.get(task_id, [])
def index_of(self, task_id):
if task_id in self._all_ids:
return self._all_ids.index(task_id)
else:
return -1
def has_output(self, output):
if not isinstance(output, Sequence) or not self._unsubmitted_slots:
return False
return any(x in self._all_output for x in output)
def get_job(self, all_tasks=False):
# single tasks
ids = []
# submit all tasks without trunk, easy
for slot in self._unsubmitted_slots:
# create a master task
master = MasterTaskParams(self.trunk_workers)
for _, (task_id, taskdef, _) in slot:
master.push(task_id, taskdef)
ids.append(master.ID)
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
self._unsubmitted_slots = []
# individual tasks...
if self.trunk_size == 1 or all_tasks:
to_be_submitted = self._unsubmitted_tasks
[
to_be_submitted.extend([x[1] for x in slot if x[1] is not None])
for slot in self._slots
if slot
]
self._unsubmitted_tasks = []
else:
# save complete blocks
num_tasks = (
len(self._unsubmitted_tasks) // self.trunk_size * self.trunk_size
)
to_be_submitted = self._unsubmitted_tasks[:num_tasks]
self._unsubmitted_tasks = self._unsubmitted_tasks[num_tasks:]
if self.trunk_size == 1 or (all_tasks and len(self._unsubmitted_tasks) == 1):
for task_id, taskdef, _ in to_be_submitted:
# if the task file, perhaps it is already running, we do not change
# the task file. Otherwise we are changing the status of the task
TaskFile(task_id).save(taskdef)
send_message_to_controller(
[
"workflow_sig",
"task",
task_id,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(task_id)
else:
master = None
for task_id, taskdef, _ in to_be_submitted:
if master is not None and master.num_tasks() == self.trunk_size:
ids.append(master.ID)
TaskFile(master.ID).save(master)
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
master = None
if master is None:
master = MasterTaskParams(self.trunk_workers)
master.push(task_id, taskdef)
# the last piece
if master is not None:
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(master.ID)
if not ids:
return None
self._submitted_tasks.extend(ids)
return ids
def clear_submitted(self):
self._submitted_tasks = []
def expand_input_files(*args, **kwargs):
# if unspecified, use __step_output__ as input (default)
# resolve dynamic input.
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
# if no input,
if not args and not kwargs:
return env.sos_dict["step_input"]
# if only group_by ...
elif not args and all(x in SOS_TARGETS_OPTIONS for x in kwargs.keys()):
return sos_targets(
env.sos_dict["step_input"],
_verify_existence=env.config["error_mode"] != "ignore",
**kwargs,
)
else:
return sos_targets(
*args,
**kwargs,
_verify_existence=env.config["error_mode"] != "ignore",
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_depends_files(*args, **kwargs):
"""handle directive depends"""
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_output_files(value, *args, **kwargs):
"""Process output files (perhaps a pattern) to determine input files."""
if any(isinstance(x, dynamic) for x in args) or any(
isinstance(y, dynamic) for y in kwargs.values()
):
return sos_targets(_undetermined=value)
else:
return sos_targets(
*args, **kwargs, _undetermined=False, _source=env.sos_dict["step_name"]
)
def parse_shared_vars(option):
shared_vars = set()
if not option:
return shared_vars
if isinstance(option, str):
shared_vars.add(option)
elif isinstance(option, Mapping):
for val in option.values():
shared_vars |= accessed_vars(val, mode="eval")
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
shared_vars.add(item)
elif isinstance(item, Mapping):
for val in item.values():
shared_vars |= accessed_vars(val, mode="eval")
return shared_vars
def evaluate_shared(vars, option):
# handle option shared and store variables in a "__shared_vars" variable
shared_vars = {}
env.sos_dict.quick_update(vars[-1])
for key in vars[-1].keys():
try:
if key in ("output", "depends", "input"):
env.logger.warning(
f"Cannot overwrite variable step_{key} from substep variable {key}"
)
else:
env.sos_dict.set("step_" + key, [x[key] for x in vars])
except Exception as e:
env.logger.warning(f"Failed to create step level variable step_{key}: {e}")
if isinstance(option, str):
if option in env.sos_dict:
shared_vars[option] = env.sos_dict[option]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(option, Mapping):
for var, val in option.items():
try:
if var == val:
shared_vars[var] = env.sos_dict[var]
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
# if there are dictionaries in the sequence, e.g.
# shared=['A', 'B', {'C':'D"}]
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
if item in env.sos_dict:
shared_vars[item] = env.sos_dict[item]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(item, Mapping):
for var, val in item.items():
try:
if var == val:
continue
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str or mapping are accepted in sequence: {option}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str, sequence, or mapping are accepted in sequence: {option}"
)
return shared_vars
def get_value_of_param(name, param_list, extra_dict={}):
tree = ast.parse(f"__null_func__({param_list})")
# x.func can be an attribute (e.g. a.b()) and do not have id
kwargs = [
x for x in ast.walk(tree) if x.__class__.__name__ == "keyword" and x.arg == name
]
if not kwargs:
return []
try:
return [ast.literal_eval(kwargs[0].value)]
except Exception:
return [
eval(
compile(
ast.Expression(body=kwargs[0].value),
filename="<string>",
mode="eval",
),
extra_dict,
)
]
def is_sos_run_the_only_last_stmt(stmt):
tree = ast.parse(stmt)
return (
len(tree.body) >= 1
and isinstance(tree.body[-1], ast.Expr)
and isinstance(tree.body[-1].value, ast.Call)
and hasattr(tree.body[-1].value.func, "id")
and tree.body[-1].value.func.id == "sos_run"
and len(
[
x
for x in ast.walk(tree)
if isinstance(x, ast.Call)
and hasattr(x.func, "id")
and x.func.id == "sos_run"
]
)
== 1
)
class Base_Step_Executor:
# This base class defines how steps are executed. The derived classes will reimplement
# some function to behave differently in different modes.
#
def __init__(self, step):
self.step = step
self.task_manager = None
self.exec_error = ExecuteError(self.step.step_name())
#
# Functions that should be redefined in derived class
#
def submit_tasks(self, tasks):
raise RuntimeError("Undefined base function submit_tasks")
def wait_for_tasks(self, tasks, all_submitted):
# this will be redefined in subclasses
raise RuntimeError("Undefined base function wait_for_tasks")
def wait_for_subworkflows(self, allow_pending=0):
raise RuntimeError("Undefined base function wait_for_subworkflows")
def handle_unknown_target(self, e):
raise RuntimeError("Undefined base function handle_unknown_target")
def init_input_output_vars(self):
# if there is __step_output__ from previous step, use it as default input
# otherwise, reset to empty
if (
"__step_output__" not in env.sos_dict
or env.sos_dict["__step_output__"].unspecified()
):
env.sos_dict.set("step_input", sos_targets([]))
else:
env.sos_dict.set("step_input", env.sos_dict["__step_output__"])
# input can be Undetermined from undetermined output from last step
env.sos_dict.set("_input", copy.deepcopy(env.sos_dict["step_input"]))
# if there is default output for auxiliary steps, use it as step_output and _output
# otherwise reset to unspecified.
if "__default_output__" in env.sos_dict:
# if step is triggered by sos_step, it should not be considered as
# output of the step. #981
env.sos_dict.set(
"__default_output__",
sos_targets(
[
x
for x in env.sos_dict["__default_output__"]._targets
if not isinstance(x, sos_step)
]
),
)
env.sos_dict.set(
"step_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
env.sos_dict.set(
"_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
else:
env.sos_dict.set("step_output", sos_targets([]))
# output is said to be unspecified until output: is used
env.sos_dict.set("_output", sos_targets(_undetermined=True))
env.sos_dict.set("step_depends", sos_targets([]))
env.sos_dict.set("_depends", sos_targets([]))
#
# Common functions
#
def verify_output(self):
missing = sos_targets([])
if env.sos_dict["step_output"] is None:
return
if not env.sos_dict["step_output"].valid():
raise RuntimeError(
"Output of a completed step cannot be undetermined or unspecified."
)
for target in env.sos_dict["step_output"]:
if isinstance(target, (sos_step, invalid_target)):
continue
if isinstance(target, str):
if not file_target(target).target_exists("any"):
if env.config["run_mode"] == "dryrun":
# in dryrun mode, we just create these targets
file_target(target).create_placeholder()
else:
# latency wait for 2 seconds because the file system might be slow
if env.config["run_mode"] == "run":
time.sleep(2)
if not file_target(target).target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]} (curdir={os.getcwd()})'
)
elif not target.target_exists("any"):
if env.config["run_mode"] == "dryrun":
target.create_placeholder()
else:
if env.config["run_mode"] == "run":
time.sleep(2)
if not target.target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]}'
)
return missing
# directive input
def process_input_args(self, ifiles: sos_targets, **kwargs):
"""This function handles directive input and all its parameters.
It
determines and set __step_input__
determines and set pattern variables if needed
returns
_groups
_vars
which are groups of _input and related _vars
"""
if ifiles.unspecified():
env.sos_dict.set("step_input", sos_targets([]))
env.sos_dict.set("_input", sos_targets([]))
env.sos_dict.set("step_output", sos_targets())
return [sos_targets([])], [{}]
assert isinstance(ifiles, sos_targets)
if env.sos_dict.get("__dynamic_input__", False):
runner = self.verify_dynamic_targets(
[x for x in ifiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# input file is the filtered files
env.sos_dict.set("step_input", ifiles)
env.sos_dict.set("_input", ifiles)
if ifiles._num_groups() == 0:
ifiles._group("all")
#
return ifiles.groups
def verify_dynamic_targets(self, target):
yield None
return True
def process_depends_args(self, dfiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_DEPENDS_OPTIONS:
raise RuntimeError(f"Unrecognized depends option {k}")
if dfiles.undetermined():
raise ValueError(r"Depends needs to handle undetermined")
if env.sos_dict.get("__dynamic_depends__", False):
runner = self.verify_dynamic_targets(
[x for x in dfiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.set("_depends", dfiles)
env.sos_dict.set("step_depends", dfiles)
def process_output_args(self, ofiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_OUTPUT_OPTIONS:
raise RuntimeError(f"Unrecognized output option {k}")
if ofiles._num_groups() > 0:
if ofiles._num_groups() == 1:
ofiles = ofiles._get_group(0)
elif ofiles._num_groups() != len(self._substeps):
raise RuntimeError(
f"Inconsistent number of output ({ofiles._num_groups()}) and input ({len(self._substeps)}) groups."
)
else:
ofiles = ofiles._get_group(env.sos_dict["_index"])
# create directory
if ofiles.valid():
parents = set(
[
os.path.abspath(os.path.join(ofile, os.pardir))
for ofile in ofiles
if isinstance(ofile, file_target)
]
)
for parent_dir in parents:
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir, exist_ok=True)
# set variables
env.sos_dict.set("_output", ofiles)
env.sos_dict.set("step_output", ofiles)
#
for ofile in ofiles:
oname = ofile.target_name()
if oname in self._all_outputs:
raise ValueError(
f'Output {ofile} from substep {env.sos_dict["_index"]} of {env.sos_dict["__num_groups__"]} substeps overlaps with output from a previous substep.'
)
self._all_outputs.add(oname)
def submit_task(self, task_info):
if self.task_manager is None:
if self.step.task_params:
for key in ("trunk_size", "trunk_workers", "queue"):
val = get_value_of_param(
key, self.step.task_params, extra_dict=env.sos_dict.dict()
)
if val:
env.sos_dict["_runtime"][key] = val[0]
if "trunk_size" in env.sos_dict["_runtime"]:
trunk_size = env.sos_dict["_runtime"]["trunk_size"]
if trunk_size is None or trunk_size <= 0:
trunk_size = env.sos_dict["__num_groups__"]
if not isinstance(trunk_size, int):
raise ValueError(
f'An integer value or None is expected for runtime option trunk_size, "{trunk_size}" provided'
)
else:
trunk_size = 1
if "trunk_workers" in env.sos_dict["_runtime"]:
if "nodes" in env.sos_dict["_runtime"]:
raise ValueError(
'Option "trunk_workers" that specifies number of nodes and processes for the execution '
'of single-node jobs and option "nodes" that specifies number of nodes for single multi-node '
"jobs cannot be used at the same time."
)
trunk_workers = env.sos_dict["_runtime"]["trunk_workers"]
else:
trunk_workers = None
# if 'queue' in env.sos_dict['_runtime'] and env.sos_dict['_runtime']['queue']:
# host = env.sos_dict['_runtime']['queue']
# else:
# # otherwise, use workflow default
# host = '__default__'
self.task_manager = TaskManager(
env.sos_dict["__num_groups__"], trunk_size, trunk_workers
)
task_id = task_info["task_id"]
task_index = task_info["index"]
if task_id is None:
self.task_manager.set(task_index, None)
return None
taskdef = task_info["task_def"]
task_vars = task_info["task_vars"]
# 618
# it is possible that identical tasks are executed (with different underlying random numbers)
# we should either give a warning or produce different ids...
if self.task_manager.index_of(task_id) >= 0:
raise RuntimeError(
f'Task {task_id} generated for (_index={env.sos_dict["_index"]}) is identical to a previous one (_index={self.task_manager.index_of(task_id)}).'
)
elif self.task_manager.has_output(task_vars["_output"]):
raise RuntimeError(
f'Task produces output files {", ".join(task_vars["_output"])} that are output of other tasks.'
)
# if no trunk_size, the job will be submitted immediately
# otherwise tasks will be accumulated and submitted in batch
self.task_manager.set(task_index, (task_id, taskdef, task_vars["_output"]))
tasks = self.task_manager.get_job()
if tasks:
self.submit_tasks(tasks)
return task_id
def wait_for_results(self, all_submitted):
# this is a generator function because wait_for_tasks is a generator
# function and needs to yield to the caller
if self.concurrent_substep:
try:
runner = self.wait_for_substep()
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
if self.task_manager is None:
return {}
#
# report task
# what we should do here is to get the alias of the Host
# because it can be different (e.g. not localhost
queue = env.sos_dict["_runtime"]["queue"]
# submit the last batch of tasks
tasks = self.task_manager.get_job(all_tasks=True)
if tasks:
self.submit_tasks(tasks)
# waiting for results of specified IDs
try:
# 1218
runner = self.wait_for_tasks(
self.task_manager._submitted_tasks, all_submitted
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
results = e.value
for id, result in results.items():
# turn to string to avoid naming lookup issue
rep_result = {
x: (y if isinstance(y, (int, bool, float, str)) else short_repr(y))
for x, y in result.items()
}
rep_result["tags"] = " ".join(self.task_manager.tags(id))
rep_result["queue"] = queue
send_message_to_controller(["workflow_sig", "task", id, repr(rep_result)])
self.task_manager.clear_submitted()
# if in dryrun mode, we display the output of the dryrun task
if env.config["run_mode"] == "dryrun":
tid = list(results.keys())[0]
tf = TaskFile(tid)
if tf.has_stdout():
print(TaskFile(tid).stdout)
for idx, task in self.proc_results.items():
# if it is done
if isinstance(task, dict):
continue
if task in results:
self.proc_results[idx] = results[task]
else:
# can be a subtask
for _, mres in results.items():
if "subtasks" in mres and task in mres["subtasks"]:
self.proc_results[idx] = mres["subtasks"][task]
# elif 'exception' in mres:
# self.proc_results[idx] = mres
#
# check if all have results?
if any(isinstance(x, str) for x in self.proc_results.values()):
raise RuntimeError(
f'Failed to get results for tasks {", ".join(x for x in self.proc_results.values() if isinstance(x, str))}'
)
#
for idx, res in self.proc_results.items():
if "skipped" in res and res["skipped"]:
self.completed["__task_skipped__"] += 1
# complete case: task skipped
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
# complete case: task completed
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.completed["__task_completed__"] += 1
if "shared" in res:
self.shared_vars[idx].update(res["shared"])
def log(self, stage=None, msg=""):
if stage == "start":
env.logger.info(
f'{"Checking" if env.config["run_mode"] == "dryrun" else "Running"} ``{self.step.step_name(True)}``: {self.step.comment.strip()}'
)
elif stage == "input statement":
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("STEP", f"Handling input statement {msg}")
elif stage == "_input":
if env.sos_dict["_input"] is not None and len(env.sos_dict["_input"]) > 0:
env.logger.debug(
f'_input: ``{short_repr(env.sos_dict["_input"])}``{msg}'
)
elif stage == "_depends":
if env.sos_dict["_depends"] is not None:
env.logger.debug(
f'_depends: ``{short_repr(env.sos_dict["_depends"])}``{msg}'
)
elif stage == "input":
if env.sos_dict["step_input"] is not None:
env.logger.info(
f'input: ``{short_repr(env.sos_dict["step_input"])}``{msg}'
)
elif stage == "output":
if (
env.sos_dict["step_output"] is not None
and len(env.sos_dict["step_output"]) > 0
):
env.logger.info(
f'``{self.step.step_name(True)}`` output: ``{short_repr(env.sos_dict["step_output"])}``{msg}'
)
def execute(self, stmt, return_result=False):
try:
self.last_res = SoS_exec(
stmt,
return_result=return_result or env.config["run_mode"] == "interactive",
)
if return_result:
return self.last_res
except (StopInputGroup, TerminateExecution, UnavailableLock):
raise
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr)
except ArgumentError:
raise
except ProcessKilled:
raise
except KeyboardInterrupt as e:
raise RuntimeError(get_traceback_msg(e))
except Exception as e:
raise RuntimeError(get_traceback_msg(e))
def prepare_substep(self):
# socket to collect result
self.result_pull_socket = create_socket(
env.zmq_context, zmq.PULL, "substep result collector"
)
local_ip = get_localhost_ip()
port = self.result_pull_socket.bind_to_random_port(f"tcp://{local_ip}")
env.config["sockets"]["result_push_socket"] = f"tcp://{local_ip}:{port}"
def submit_substep(self, param):
send_message_to_controller(["substep", param])
def process_returned_substep_result(self, till=None, wait=True):
while True:
if not wait:
# 1213
cur_index = env.sos_dict["_index"]
pending_substeps = cur_index - self._completed_concurrent_substeps + 1
if pending_substeps < (
100
if isinstance(self.concurrent_substep, bool)
else self.concurrent_substep
):
if not self.result_pull_socket.poll(0):
return
elif (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
# if there are more than 100 pending substeps
# we wait indefinitely for the results
env.log_to_file(
"STEP",
f"Wait for more substeps to be done before submitting. (index={cur_index}, processed={self._completed_concurrent_substeps})",
)
elif self._completed_concurrent_substeps == till:
return
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
if isinstance(res["exception"], ProcessKilled):
raise res["exception"]
elif isinstance(res["exception"], RemovedTarget):
pass
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"""Ignoring error from ``{self.step.step_name(True)}`` {idx_msg}: {res["exception"]}."""
)
res["output"] = sos_targets(invalid_target())
elif env.config["error_mode"] == "abort":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
# try to stop everything but wait till for submitted tasks to
# complete
self._completed_concurrent_substeps + 1
waiting = till - 1 - self._completed_concurrent_substeps
env.logger.warning(
f'``{self.step.step_name(True)}`` {idx_msg} returns an error.{f" Terminating step after completing {waiting} submitted substeps." if waiting else " Terminating now."}'
)
for i in range(waiting):
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
self.exec_error.append(
f'index={res["index"]}', res["exception"]
)
raise self.exec_error
else:
# default or unspecified
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
#
if "index" not in res:
raise RuntimeError(
"Result received from substep does not have key index"
)
if "task_id" in res:
task = self.submit_task(res)
# if substep returns tasks, ...
if res["task_id"]:
self.proc_results[res["index"]] = task
else:
# if there is no task_id, the substep must have
# been skipped.
self.proc_results[res["index"]] = res
else:
self.proc_results[res["index"]] = res
self._completed_concurrent_substeps += 1
def wait_for_substep(self):
while self._completed_concurrent_substeps < len(self.proc_results):
try:
runner = self.process_returned_substep_result(
till=len(self.proc_results), wait=True
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
def collect_result(self):
# only results will be sent back to the master process
#
# __step_input__: input of this step
# __steo_output__: output of this step
# __step_depends__: dependent files of this step
result = {
"__step_input__": env.sos_dict["step_input"],
"__step_output__": env.sos_dict["step_output"],
"__step_depends__": env.sos_dict["step_depends"],
"__step_name__": env.sos_dict["step_name"],
"__completed__": self.completed,
}
result["__last_res__"] = self.last_res
result["__shared__"] = {}
if "shared" in self.step.options:
result["__shared__"] = self.shared_vars
for x in result["__step_output__"].targets:
if isinstance(x, sos_variable):
result["__shared__"][x.target_name()] = env.sos_dict[x.target_name()]
send_message_to_controller(
[
"progress",
"step_completed",
-1
if "sos_run" in env.sos_dict["__signature_vars__"]
else self.completed["__step_completed__"],
env.sos_dict["step_name"],
env.sos_dict["step_output"],
]
)
return result
def set_task_queue_from_task_params(self):
if self.step.task_params:
try:
task_queue = get_value_of_param(
"queue", self.step.task_params, extra_dict=env.sos_dict.dict()
)
if task_queue:
env.sos_dict["_runtime"]["queue"] = task_queue[0]
except Exception as e:
raise ValueError(
f"Failed to determine value of parameter queue of {self.step.task_params}: {e}"
)
# # check concurrent #1134
# try:
# task_concurrency = get_value_of_param(
# 'concurrent',
# self.step.task_params,
# extra_dict=env.sos_dict.dict())
# if task_concurrency:
# env.sos_dict['_runtime']['concurrent'] = task_concurrency[0]
# except Exception as e:
# raise ValueError(
# f'Failed to determine value of parameter queue of {self.step.task_params}: {e}'
# )
# if -q is unspecified and option queue is unspecified,
# or queue=None is specified, disregard the task keyword
if (
env.config["default_queue"] is None
and "queue" not in env.sos_dict["_runtime"]
) or (
"queue" in env.sos_dict["_runtime"]
and env.sos_dict["_runtime"]["queue"] is None
):
# remove task statement
if len(self.step.statements) >= 1 and self.step.statements[-1][0] == "!":
self.step.statements[-1][1] += "\n" + self.step.task
else:
self.step.statements.append(["!", self.step.task])
self.step.task = None
# is queue is unspecified, it take value from command line
# in this case -q should have been specified
elif "queue" not in env.sos_dict["_runtime"]:
env.sos_dict["_runtime"]["queue"] = env.config["default_queue"]
def local_exec_without_signature(self, statement):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP", f'Execute substep {env.sos_dict["step_name"]} without signature'
)
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
finally:
if not self.step.task:
# if no task, this step is __completed
# complete case: local skip without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
def local_exec_with_signature(self, statement, sig):
idx = env.sos_dict["_index"]
# signature might be built outside of the function
# not in a debug mode delayed to now
if sig is None:
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
# if singaure match, we skip the substep even if
# there are tasks.
matched = validate_step_sig(sig)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
return True
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
sig.lock()
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
finally:
# if this is the end of substep, save the signature
# otherwise we need to wait for the completion
# of the task.
if not self.step.task:
if env.sos_dict["step_output"].undetermined():
output = reevaluate_output()
self.output_groups[env.sos_dict["_index"]] = output
sig.set_output(output)
sig.write()
# complete case : local execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
self.pending_signatures[idx] = sig
sig.release()
return False
def skip_substep(self):
idx = env.sos_dict["_index"]
# if concurrent substep, there might be later steps that needs to be rerun
# and we need to mark some steps has been completed.
if self.concurrent_substep:
self._completed_concurrent_substeps += 1
self.proc_results[idx] = {
"index": idx,
"ret_code": 0,
"output": copy.deepcopy(env.sos_dict["_output"]),
}
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
def concurrent_exec(self, statement, sig=None):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} {idx} concurrently with {self._completed_concurrent_substeps} completed',
)
# the ignatures are supposed to be written by substep worker, however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if (
env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
and self.step.task
):
self.pending_signatures[idx] = (
sig
if sig
else RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
)
#
# step_output: needed only when it is undetermined
# step_input: not needed
# _input, _output, _depends, _index: needed
# step_name: for debug scripts
# step_id, workflow_id: for reporting to controller
# '__signature_vars__' to be used for signature creation
#
# __step_context__ is not needed because substep
# executor does not support nested workflow
proc_vars = (
env.sos_dict["__signature_vars__"]
| env.sos_dict["__environ_vars__"]
| {
"_input",
"_output",
"_depends",
"_index",
"step_output",
"step_name",
"_runtime",
"step_id",
"workflow_id",
"__num_groups__",
"__signature_vars__",
}
)
self.proc_results[env.sos_dict["_index"]] = {}
self.submit_substep(
dict(
stmt=statement[1],
global_def=self.step.global_def,
# 1225: the step might contain large variables from global section, but
# we do not have to sent them if they are not used in substeps.
cwd=os.getcwd(),
global_vars={
x: y
for x, y in self.step.global_vars.items()
if x in env.sos_dict["__signature_vars__"]
},
task=self.step.task,
task_params=self.step.task_params,
proc_vars=env.sos_dict.clone_selected_vars(proc_vars),
shared_vars=self.vars_to_be_shared,
config=env.config,
)
)
def check_task_sig(self):
idx = env.sos_dict["_index"]
sig = RuntimeInfo(
statementMD5([self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
env.log_to_file(
"STEP",
f'Check task-only step {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[env.sos_dict["_index"]] = matched["output"]
self.shared_vars[env.sos_dict["_index"]].update(matched["vars"])
# complete case: step with task ignored
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.pending_signatures[idx] = sig
return skip_index
# def is_task_active(self):
# active = env.sos_dict['_runtime']['active']
# env.logger.error(active)
# if active is True:
# return True
# elif active is False:
# return False
# elif isinstance(active, int):
# if active >= 0 and env.sos_dict['_index'] != active:
# return False
# if active < 0 and env.sos_dict[
# '_index'] != active + env.sos_dict['__num_groups__']:
# return False
# return True
# elif isinstance(active, Sequence):
# allowed_index = list([
# x if x >= 0 else env.sos_dict['__num_groups__'] + x
# for x in active
# ])
# return env.sos_dict['_index'] in allowed_index
# elif isinstance(active, slice):
# allowed_index = list(range(env.sos_dict['__num_groups__']))[active]
# return env.sos_dict['_index'] in allowed_index
# else:
# raise RuntimeError(
# f'Unacceptable value for option active: {active}')
def check_results(self):
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] == 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
# now that output is settled, we can write remaining signatures
for idx, res in self.proc_results.items():
if (
self.pending_signatures[idx] is not None
and res["ret_code"] == 0
and "sig_skipped" not in res
):
# task might return output with vars #1355
self.pending_signatures[idx].set_output(self.output_groups[idx])
self.pending_signatures[idx].write()
if res["ret_code"] != 0 and "output" in res:
clear_output(output=res["output"])
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] != 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
if "exception" in proc_result:
excp = proc_result["exception"]
if isinstance(excp, StopInputGroup):
if excp.message:
env.logger.info(excp.message)
self.output_groups[proc_result["index"]] = sos_targets([])
elif isinstance(excp, RemovedTarget):
raise excp
elif "task" in proc_result:
if env.config["error_mode"] == "ignore":
env.logger.warning(f"Ignore failed task {proc_result['task']}.")
# if the exception is from a task...
self.exec_error.append(proc_result["task"], excp)
else:
self.exec_error.append(
RuntimeError(
f"Substep failed with return code {proc_result['ret_code']}"
)
)
# this is after all substeps have been completed
if self.exec_error.errors:
raise self.exec_error
def calculate_completed(self):
substeps = (
self.completed["__substep_completed__"]
+ self.completed["__substep_skipped__"]
)
self.completed["__step_completed__"] = (
self.completed["__substep_completed__"] / substeps
)
self.completed["__step_skipped__"] = (
self.completed["__substep_skipped__"] / substeps
)
if self.completed["__step_completed__"].is_integer():
self.completed["__step_completed__"] = int(
self.completed["__step_completed__"]
)
if self.completed["__step_skipped__"].is_integer():
self.completed["__step_skipped__"] = int(self.completed["__step_skipped__"])
def run(self):
"""Execute a single step and return results. The result for batch mode is the
input, output etc returned as alias, and for interactive mode is the return value
of the last expression."""
# return value of the last executed statement
self.last_res = None
self.start_time = time.time()
self.completed = defaultdict(int)
#
# prepare environments, namely variables that can be used by the step
#
# * step_name: name of the step, can be used by step process to determine
# actions dynamically.
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set("__last_step__", self.step.last_step)
self.log("start")
env.sos_dict.set(
"step_id",
textMD5(
f'{env.sos_dict["workflow_id"]} {env.sos_dict["step_name"]} {self.step.md5}'
),
)
env.sos_dict.set("master_id", env.config["master_id"])
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
env.sos_dict.set("_runtime", {})
# * input: input files, which should be __step_output__ if it is defined, or
# None otherwise.
# * _input: first batch of input, which should be input if no input statement is used
# * output: None at first, can be redefined by output statement
# * _output: None at first, can be redefined by output statement
# * depends: None at first, can be redefined by depends statement
# * _depends: None at first, can be redefined by depends statement
#
self.init_input_output_vars()
# _index is needed for pre-input action's active option and for debug output of scripts
env.sos_dict.set("_index", 0)
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"STEP",
f'Executing step {env.sos_dict["step_name"]} with step_input {env.sos_dict["step_input"]} and step_output {env.sos_dict["step_output"]}',
)
self.set_task_queue_from_task_params()
# look for input statement.
input_statement_idx = [
idx
for idx, x in enumerate(self.step.statements)
if x[0] == ":" and x[1] == "input"
]
if not input_statement_idx:
input_statement_idx = None
elif len(input_statement_idx) == 1:
input_statement_idx = input_statement_idx[0]
else:
raise ValueError(
f"More than one step input are specified in step {self.step.step_name(True)}"
)
# if shared is true, we have to disable concurrent because we
# do not yet return anything from shared.
self.concurrent_substep = "shared" not in self.step.options
# and \
# ('concurrent' not in env.sos_dict['_runtime'] or env.sos_dict['_runtime']['concurrent'] is True)
if input_statement_idx is not None:
# execute before input stuff
for statement in self.step.statements[:input_statement_idx]:
if statement[0] == ":":
# wait for all dependent targets to be resolved to be resolved
key, value = statement[1:3]
if key != "depends":
raise ValueError(f"Step input should be specified before {key}")
while True:
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(dfiles, **kwargs)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
break
else:
try:
# 1354
# if there are definition before input, the definitions and imports
# must be added to global_def in order to be executed by substeps
if any(x in statement[1] for x in ("class", "def", "import")):
step_def = KeepOnlyImportAndDefine().visit(
ast.parse(statement[1])
)
if step_def.body:
if isinstance(self.step.global_def, ast.Module):
self.step.global_def.body.extend(step_def.body)
else:
self.step.global_def = step_def
self.execute(statement[1])
except StopInputGroup as e:
# stop before substeps, because there is no output statement before it
# we do not have to worry about keep_output
if e.message:
env.logger.info(e.message)
return self.collect_result()
# input statement
stmt = self.step.statements[input_statement_idx][2]
self.log("input statement", stmt)
while True:
# wait for all targets to be resovled
try:
args, kwargs = SoS_eval(
f"__null_func__({stmt})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# Files will be expanded differently with different running modes
input_files: sos_targets = expand_input_files(
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_INPUT_OPTIONS
},
)
runner = self.process_input_args(
input_files,
**{k: v for k, v in kwargs.items() if k in SOS_INPUT_OPTIONS},
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
self._substeps = e.value
#
if "concurrent" in kwargs and self.concurrent_substep:
# concurrent can be True/False or an integer
self.concurrent_substep = kwargs["concurrent"]
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise ValueError(f"Failed to process input statement {stmt}: {e}")
break
input_statement_idx += 1
elif env.sos_dict["step_input"].groups:
# if default has groups...
# default case
self._substeps = env.sos_dict["step_input"].groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else:
# default case
self._substeps = [env.sos_dict["step_input"]]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self.proc_results = {}
self.vars_to_be_shared = set()
if "shared" in self.step.options:
self.vars_to_be_shared = parse_shared_vars(self.step.options["shared"])
self.vars_to_be_shared = sorted(
[
x[5:] if x.startswith("step_") else x
for x in self.vars_to_be_shared
if x not in ("step_", "step_input", "step_output", "step_depends")
]
)
self.shared_vars = [{} for x in self._substeps]
# run steps after input statement, which will be run multiple times for each input
# group.
env.sos_dict.set("__num_groups__", len(self._substeps))
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index, which can remain to be None if no output
# is defined.
self.output_groups = [sos_targets([]) for x in self._substeps]
self.depends_groups = [sos_targets([]) for x in self._substeps]
# used to prevent overlapping output from substeps
self._all_outputs = set()
self._subworkflow_results = []
if (
any("sos_run" in x[1] for x in self.step.statements[input_statement_idx:])
and "shared" not in self.step.options
and not self.step.task
and self.step.statements[-1][0] == "!"
and (len(self.step.statements) == 1 or self.step.statements[-2][0] == ":")
and is_sos_run_the_only_last_stmt(self.step.statements[-1][1])
):
env.sos_dict.set("__concurrent_subworkflow__", True)
if self.concurrent_substep:
if len(self._substeps) <= 1 or env.config["run_mode"] == "dryrun":
self.concurrent_substep = False
elif any(
"sos_run" in x[1] for x in self.step.statements[input_statement_idx:]
):
self.concurrent_substep = False
env.logger.debug(
"Substeps are executed sequentially because of existence of multiple nested workflow."
)
else:
self.prepare_substep()
try:
self.completed["__substep_skipped__"] = 0
self.completed["__substep_completed__"] = len(self._substeps)
self._completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
self.pending_signatures = [None for x in self._substeps]
for idx, g in enumerate(self._substeps):
#
# https://github.com/vatlab/sos/issues/1376
#
# [default]
# input: for_each=dict(i=range(1000))
# sos_run('a', t=i)
#
# when we have workflow like the following when steps
# are executed quickly with subworkflows submitted to the master
# the master process could be swamped with subworkflows, causing
# "too many open files".
#
# the following code will stop the step from continued
# execution and wait for the subworkflows to complete.
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(
allow_pending=env.config["worker_procs"]
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# other variables
#
_vars = {}
# now, let us expose target level variables as lists
if len(g) > 1:
names = set.union(*[set(x._dict.keys()) for x in g._targets])
elif len(g) == 1:
names = set(g._targets[0]._dict.keys())
else:
names = set()
for name in names:
_vars[name] = [x.get(name) for x in g._targets]
# then we expose all group level variables
_vars.update(g._dict)
_vars.update(env.sos_dict["step_input"]._dict)
env.sos_dict.update(_vars)
env.sos_dict.set("_input", copy.deepcopy(g))
# set vars to _input
# env.sos_dict['_input'].set(**v)
self.log("_input")
env.sos_dict.set("_index", idx)
if env.config["error_mode"] == "ignore":
missed = [x for x in g.targets if not x.target_exists()]
if missed:
if any(isinstance(x, invalid_target) for x in missed):
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to invalid input caused by previous failed substep.'
)
else:
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to missing input {sos_targets(missed)}'
)
self.output_groups[idx] = sos_targets(invalid_target())
env.sos_dict.set("_output", sos_targets(invalid_target()))
self.skip_substep()
continue
# in interactive mode, because sos_dict are always shared
# execution of a substep, especially when it calls a nested
# workflow, would change step_name, __step_context__ etc, and
# we will have to reset these variables to make sure the next
# substep would execute normally. Batch mode is immune to this
# problem because nested workflows are executed in their own
# process/context etc
if env.config["run_mode"] == "interactive":
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set(
"step_id",
hash(
(
env.sos_dict["workflow_id"],
env.sos_dict["step_name"],
self.step.md5,
)
),
)
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
#
pre_statement = []
if (
not any(
st[0] == ":" and st[1] == "output"
for st in self.step.statements[input_statement_idx:]
)
and "__default_output__" in env.sos_dict
):
pre_statement = [[":", "output", "_output"]]
# if there is no statement, no task, claim success
post_statement = []
if not self.step.statements or self.step.statements[-1][0] != "!":
if self.step.task:
# if there is only task, we insert a fake statement so that it can be executed by the executor
post_statement = [["!", ""]]
else:
# complete case: no step, no statement
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
all_statements = (
pre_statement
+ self.step.statements[input_statement_idx:]
+ post_statement
)
self.is_input_verified = True
for statement_idx, statement in enumerate(all_statements):
is_last_runblock = statement_idx == len(all_statements) - 1
# if input is undertermined, we can only process output:
if not g.valid() and statement[0] != ":":
raise RuntimeError("Undetermined input encountered")
if statement[0] == ":":
key, value = statement[1:3]
# output, depends, and process can be processed multiple times
while True:
# loop for all unresolved targets to be resolved
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# dynamic output or dependent files
if key == "output":
# if output is defined, its default value needs to be cleared
if idx == 0:
env.sos_dict.set("step_output", sos_targets())
ofiles: sos_targets = expand_output_files(
value,
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_OUTPUT_OPTIONS
},
)
if g.valid() and ofiles.valid():
if any(
x in g._targets
for x in ofiles
if not isinstance(x, sos_step)
):
raise RuntimeError(
f'Overlapping input and output files: {", ".join(repr(x) for x in ofiles if x in g)}'
)
# set variable _output and output
self.process_output_args(
ofiles,
**{
k: v
for k, v in kwargs.items()
if k in SOS_OUTPUT_OPTIONS
},
)
self.output_groups[idx] = env.sos_dict["_output"]
elif key == "depends":
try:
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(
dfiles, **kwargs
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
self.depends_groups[idx] = env.sos_dict[
"_depends"
]
self.log("_depends")
except Exception:
# env.logger.info(e)
raise
else:
raise RuntimeError(f"Unrecognized directive {key}")
# everything is ok, break
break
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
# if input is Undertermined, it is possible that output cannot be processed
# due to that, and we just return
if not g.valid():
env.logger.debug(e)
return self.collect_result()
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
elif is_last_runblock:
if (
env.config["sig_mode"] == "skip"
and not self.vars_to_be_shared
and "sos_run" not in statement[1]
and not env.sos_dict["_output"].unspecified()
and len(env.sos_dict["_output"]) > 0
and all(
x.target_exists()
for x in env.sos_dict["_output"].targets
)
and env.sos_dict["_output"].later_than(
env.sos_dict["_input"]
)
):
self.skip_substep()
env.logger.info(
f'``{env.sos_dict["step_name"]}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``skipped`` with existing output.'
)
skip_index = True
# do not execute the rest of the statement
break
#
# default mode, check if skipping substep
sig = None
if (
env.config["sig_mode"]
not in ("ignore", "distributed", "build")
and not env.sos_dict["_output"].unspecified()
):
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if skip_index:
# matched["output"] might hav vars not defined in "output" #1355
env.sos_dict.set("_output", matched["output"])
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
self.skip_substep()
break
try:
if self.concurrent_substep:
self.concurrent_exec(statement, sig)
# we check if the previous task has been completed and process them
# because further steps might need to be done
try:
runner = self.process_returned_substep_result(
till=idx + 1, wait=False
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
elif (
env.config["sig_mode"] == "ignore"
or env.sos_dict["_output"].unspecified()
):
self.local_exec_without_signature(statement)
else:
skip_index = self.local_exec_with_signature(
statement, sig
)
if skip_index:
self.skip_substep()
break
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception as e:
clear_output()
if env.config["error_mode"] == "abort":
raise
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"{self.step.step_name(True)} {idx_msg} returns no output due to error: {e}"
)
self.output_groups[idx] = sos_targets(invalid_target())
skip_index = True
else:
if env.config["run_mode"] != "interactive":
# default mode
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.error(
f"{self.step.step_name(True)} {idx_msg} returns an error."
)
self.exec_error.append(str(idx), e)
else:
# if it is not the last statement group (e.g. statements before :output)
# we execute locally without anything like signature
if self.is_input_verified:
verify_input()
self.is_input_verified = False
try:
self.execute(statement[1])
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception:
clear_output()
raise
# if there is no statement , but there are tasks, we should
# check signature here.
if (
(not self.step.statements or self.step.statements[-1][0] != "!")
and self.step.task
and not self.concurrent_substep
and env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
):
skip_index = self.check_task_sig()
# if this index is skipped, go directly to the next one
if skip_index:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
skip_index = False
continue
# if concurrent input group, tasks are handled in substep
if self.concurrent_substep or not self.step.task:
continue
if env.config["run_mode"] == "dryrun" and env.sos_dict["_index"] != 0:
continue
# # check if the task is active
# if 'active' in env.sos_dict['_runtime']:
# if not self.is_task_active():
# continue
#
self.log("task")
try:
task_id, taskdef, task_vars = create_task(
self.step.global_def,
self.step.global_vars,
self.step.task,
self.step.task_params,
)
task = self.submit_task(
{
"index": env.sos_dict["_index"],
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
)
self.proc_results[env.sos_dict["_index"]] = task
except Exception as e:
# FIXME: cannot catch exception from subprocesses
if env.verbosity > 2:
sys.stderr.write(get_traceback())
raise RuntimeError(
f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}'
)
#
# # if not concurrent, we have to wait for the completion of the task
# if 'concurrent' in env.sos_dict['_runtime'] and env.sos_dict[
# '_runtime']['concurrent'] is False:
# # in this case the steps must be executed not concurrently
# runner = self.wait_for_results(all_submitted=False)
# try:
# yreq = next(runner)
# while True:
# yres = yield yreq
# yreq = runner.send(yres)
# except StopIteration:
# pass
#
# endfor loop for each input group
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(allow_pending=0)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.pop("__concurrent_subworkflow__")
runner = self.wait_for_results(all_submitted=True)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
for idx, res in self.proc_results.items():
if "sig_skipped" in res:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
if "output" in res:
self.output_groups[idx] = res["output"]
# check results
self.check_results()
# if error happened but we allow all substeps to be completed, we now
# raise exception
if self.exec_error.errors:
raise self.exec_error
# if output is Undetermined, re-evalulate it
# finalize output from output_groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step, for compatibility.
env.sos_dict.set(
"step_output", sos_targets([])._add_groups(self.output_groups)
)
env.sos_dict.set(
"step_depends", sos_targets([])._add_groups(self.depends_groups)
)
# if there exists an option shared, the variable would be treated as
# provides=sos_variable(), and then as step_output
if "shared" in self.step.options:
self.shared_vars = evaluate_shared(
self.shared_vars, self.step.options["shared"]
)
env.sos_dict.quick_update(self.shared_vars)
missing = self.verify_output()
self.log(
"output",
msg=f'\033[95m missing: {short_repr(missing)} ({len(missing)} item{"s" if len(missing)>1 else ""})\033[0m'
if len(missing) > 0
else "",
)
self.calculate_completed()
def file_only(targets):
if not isinstance(targets, sos_targets):
env.logger.warning(
f"Unexpected input or output target for reporting. Empty list returned: {targets}"
)
return []
return [
(str(x), x.size())
for x in targets._targets
if isinstance(x, file_target)
]
step_info = {
"step_id": self.step.md5,
"start_time": self.start_time,
"stepname": self.step.step_name(True),
"substeps": len(self._substeps),
"input": file_only(env.sos_dict["step_input"]),
"output": file_only(env.sos_dict["step_output"]),
"completed": dict(self.completed),
"end_time": time.time(),
}
send_message_to_controller(
["workflow_sig", "step", env.sos_dict["workflow_id"], repr(step_info)]
)
return self.collect_result()
finally:
if self.concurrent_substep:
close_socket(self.result_pull_socket, "substep collector")
class Step_Executor(Base_Step_Executor):
"""Single process step executor"""
def __init__(self, step, socket, mode="run"):
self.run_mode = mode
env.config["run_mode"] = mode
super(Step_Executor, self).__init__(step)
self.socket = socket
# because step is executed in a separate SoS_Worker process, this
# __socket__ is available to all the actions that will be executed
# in the step
env.__socket__ = socket
def submit_tasks(self, tasks):
if "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("TASK", f"Send {tasks}")
self.socket.send(
encode_msg(["tasks", env.sos_dict["_runtime"]["queue"]] + tasks)
)
def wait_for_tasks(self, tasks, all_submitted):
# wait for task is a generator function that yields the request
# to the runner
if not tasks:
return {}
# when we wait, the "outsiders" also need to see the tags etc
# of the tasks so we have to write to the database. #156
send_message_to_controller(["commit_sig"])
# wait till the executor responde
results = {}
while True:
# yield an indicator of what is requested, for debugging purpose
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
results.update(res)
# all results have been obtained.
if len(results) == len(tasks):
break
return results
def wait_for_subworkflows(self, allow_pending):
"""Wait for results from subworkflows"""
try:
allow_pending = int(allow_pending)
except:
allow_pending = min(max(os.cpu_count() // 2, 2), 8)
while self._subworkflow_results:
if allow_pending > 0:
n_pending = sum(
len(x["pending_workflows"]) for x in self._subworkflow_results
)
if n_pending <= allow_pending:
break
# here we did not check if workflow ids match
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
if not "__workflow_id__" in res:
raise ValueError(f"Unrecognized result from subworkflows: {res}")
# remove from _self._subworkflow_results
result_with_id = [
idx
for idx, x in enumerate(self._subworkflow_results)
if res["__workflow_id__"] in x["pending_workflows"]
]
if not result_with_id:
raise RuntimeError(
f"Failed to identify ID of returned subworkflow: {res}"
)
if len(result_with_id) > 1:
raise RuntimeError(
"Multiple matches of subworkflow ID. This should not happen."
)
self._subworkflow_results[result_with_id[0]]["pending_workflows"].remove(
res["__workflow_id__"]
)
if not self._subworkflow_results[result_with_id[0]]["pending_workflows"]:
self._subworkflow_results.pop(result_with_id[0])
def handle_unknown_target(self, e):
self.socket.send(encode_msg(["missing_target", e.target]))
yield self.socket
res = decode_msg(self.socket.recv())
if not res:
raise e
def verify_dynamic_targets(self, targets):
if not targets:
return
if env.config["trace_existing"]:
traced = targets
else:
traced = [x for x in targets if x.traced]
if not traced:
return
self.socket.send(encode_msg(["dependent_target"] + traced))
yield self.socket
res = decode_msg(self.socket.recv())
if res != "target_resolved":
raise RuntimeError(f"Failed to veryify dependent target {traced}")
def run(self):
try:
try:
# 1218
runner = Base_Step_Executor.run(self)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
res = e.value
if self.socket is not None:
if (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends result {short_repr(res)}",
)
self.socket.send(encode_msg(res))
else:
return res
except RemovedTarget as e:
# removed target needs to be handled differently since the workflow manager
# use type information to get removed targets
if self.socket is not None and not self.socket.closed:
self.socket.send(encode_msg(e))
else:
raise e
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
if isinstance(e, ProcessKilled):
raise
# if not self.exec_error
if e != self.exec_error:
self.exec_error.append(self.step.step_name(), e)
#
if self.exec_error.errors:
if self.socket is not None and not self.socket.closed:
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends exception {self.exec_error}",
)
self.socket.send(encode_msg(self.exec_error))
else:
raise self.exec_error
| 42.433839 | 193 | 0.486893 |
import ast
import copy
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import List
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg, decode_msg
from .eval import SoS_eval, SoS_exec, accessed_vars, KeepOnlyImportAndDefine
from .executor_utils import (
__named_output__,
__null_func__,
__output_from__,
__traced__,
clear_output,
create_task,
get_traceback_msg,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
ExecuteError,
)
from .syntax import (
SOS_DEPENDS_OPTIONS,
SOS_INPUT_OPTIONS,
SOS_OUTPUT_OPTIONS,
SOS_TARGETS_OPTIONS,
)
from .targets import (
RemovedTarget,
RuntimeInfo,
UnavailableLock,
sos_variable,
UnknownTarget,
dynamic,
file_target,
sos_step,
sos_targets,
invalid_target
)
from .tasks import MasterTaskParams, TaskFile
from .utils import (
ArgumentError,
StopInputGroup,
TerminateExecution,
env,
get_traceback,
short_repr,
ProcessKilled,
get_localhost_ip,
textMD5,
)
__all__: List = []
class TaskManager:
def __init__(self, num_tasks, trunk_size, trunk_workers):
super(TaskManager, self).__init__()
self.num_tasks = num_tasks
import math
self._slots = [[] for x in range(math.ceil(num_tasks / trunk_size))]
self._last_slot_size = (
trunk_size if (num_tasks % trunk_size == 0) else (num_tasks % trunk_size)
)
self.trunk_size = trunk_size
self.trunk_workers = trunk_workers
self._submitted_tasks = []
self._unsubmitted_slots = []
self._unsubmitted_tasks = []
self._all_ids = []
self._all_output = []
self._terminate = False
self._tags = {}
def set(self, idx, task_def):
slot = idx // self.trunk_size
self._slots[slot].append([idx, task_def])
if len(self._slots[slot]) == self.trunk_size or (
slot == len(self._slots) - 1
and len(self._slots[slot]) == self._last_slot_size
):
if not all([x[1] is None for x in self._slots[slot]]):
if self.trunk_size == 1 or any(x[1] is None for x in self._slots[slot]):
self._unsubmitted_tasks.extend(
[x[1] for x in self._slots[slot] if x[1] is not None]
)
else:
self._unsubmitted_slots.append(
sorted(self._slots[slot], key=lambda x: x[0])
)
self._slots[slot] = []
if not task_def:
return
if isinstance(task_def[2], Sequence):
self._all_output.extend(task_def[2])
self._all_ids.append(task_def[0])
self._tags[task_def[0]] = task_def[1].tags
def tags(self, task_id):
return self._tags.get(task_id, [])
def index_of(self, task_id):
if task_id in self._all_ids:
return self._all_ids.index(task_id)
else:
return -1
def has_output(self, output):
if not isinstance(output, Sequence) or not self._unsubmitted_slots:
return False
return any(x in self._all_output for x in output)
def get_job(self, all_tasks=False):
ids = []
for slot in self._unsubmitted_slots:
master = MasterTaskParams(self.trunk_workers)
for _, (task_id, taskdef, _) in slot:
master.push(task_id, taskdef)
ids.append(master.ID)
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
self._unsubmitted_slots = []
if self.trunk_size == 1 or all_tasks:
to_be_submitted = self._unsubmitted_tasks
[
to_be_submitted.extend([x[1] for x in slot if x[1] is not None])
for slot in self._slots
if slot
]
self._unsubmitted_tasks = []
else:
num_tasks = (
len(self._unsubmitted_tasks) // self.trunk_size * self.trunk_size
)
to_be_submitted = self._unsubmitted_tasks[:num_tasks]
self._unsubmitted_tasks = self._unsubmitted_tasks[num_tasks:]
if self.trunk_size == 1 or (all_tasks and len(self._unsubmitted_tasks) == 1):
for task_id, taskdef, _ in to_be_submitted:
TaskFile(task_id).save(taskdef)
send_message_to_controller(
[
"workflow_sig",
"task",
task_id,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(task_id)
else:
master = None
for task_id, taskdef, _ in to_be_submitted:
if master is not None and master.num_tasks() == self.trunk_size:
ids.append(master.ID)
TaskFile(master.ID).save(master)
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
master = None
if master is None:
master = MasterTaskParams(self.trunk_workers)
master.push(task_id, taskdef)
if master is not None:
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(master.ID)
if not ids:
return None
self._submitted_tasks.extend(ids)
return ids
def clear_submitted(self):
self._submitted_tasks = []
def expand_input_files(*args, **kwargs):
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
if not args and not kwargs:
return env.sos_dict["step_input"]
elif not args and all(x in SOS_TARGETS_OPTIONS for x in kwargs.keys()):
return sos_targets(
env.sos_dict["step_input"],
_verify_existence=env.config["error_mode"] != "ignore",
**kwargs,
)
else:
return sos_targets(
*args,
**kwargs,
_verify_existence=env.config["error_mode"] != "ignore",
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_depends_files(*args, **kwargs):
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_output_files(value, *args, **kwargs):
if any(isinstance(x, dynamic) for x in args) or any(
isinstance(y, dynamic) for y in kwargs.values()
):
return sos_targets(_undetermined=value)
else:
return sos_targets(
*args, **kwargs, _undetermined=False, _source=env.sos_dict["step_name"]
)
def parse_shared_vars(option):
shared_vars = set()
if not option:
return shared_vars
if isinstance(option, str):
shared_vars.add(option)
elif isinstance(option, Mapping):
for val in option.values():
shared_vars |= accessed_vars(val, mode="eval")
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
shared_vars.add(item)
elif isinstance(item, Mapping):
for val in item.values():
shared_vars |= accessed_vars(val, mode="eval")
return shared_vars
def evaluate_shared(vars, option):
shared_vars = {}
env.sos_dict.quick_update(vars[-1])
for key in vars[-1].keys():
try:
if key in ("output", "depends", "input"):
env.logger.warning(
f"Cannot overwrite variable step_{key} from substep variable {key}"
)
else:
env.sos_dict.set("step_" + key, [x[key] for x in vars])
except Exception as e:
env.logger.warning(f"Failed to create step level variable step_{key}: {e}")
if isinstance(option, str):
if option in env.sos_dict:
shared_vars[option] = env.sos_dict[option]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(option, Mapping):
for var, val in option.items():
try:
if var == val:
shared_vars[var] = env.sos_dict[var]
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
if item in env.sos_dict:
shared_vars[item] = env.sos_dict[item]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(item, Mapping):
for var, val in item.items():
try:
if var == val:
continue
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str or mapping are accepted in sequence: {option}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str, sequence, or mapping are accepted in sequence: {option}"
)
return shared_vars
def get_value_of_param(name, param_list, extra_dict={}):
tree = ast.parse(f"__null_func__({param_list})")
# x.func can be an attribute (e.g. a.b()) and do not have id
kwargs = [
x for x in ast.walk(tree) if x.__class__.__name__ == "keyword" and x.arg == name
]
if not kwargs:
return []
try:
return [ast.literal_eval(kwargs[0].value)]
except Exception:
return [
eval(
compile(
ast.Expression(body=kwargs[0].value),
filename="<string>",
mode="eval",
),
extra_dict,
)
]
def is_sos_run_the_only_last_stmt(stmt):
tree = ast.parse(stmt)
return (
len(tree.body) >= 1
and isinstance(tree.body[-1], ast.Expr)
and isinstance(tree.body[-1].value, ast.Call)
and hasattr(tree.body[-1].value.func, "id")
and tree.body[-1].value.func.id == "sos_run"
and len(
[
x
for x in ast.walk(tree)
if isinstance(x, ast.Call)
and hasattr(x.func, "id")
and x.func.id == "sos_run"
]
)
== 1
)
class Base_Step_Executor:
# This base class defines how steps are executed. The derived classes will reimplement
# some function to behave differently in different modes.
#
def __init__(self, step):
self.step = step
self.task_manager = None
self.exec_error = ExecuteError(self.step.step_name())
#
# Functions that should be redefined in derived class
#
def submit_tasks(self, tasks):
raise RuntimeError("Undefined base function submit_tasks")
def wait_for_tasks(self, tasks, all_submitted):
# this will be redefined in subclasses
raise RuntimeError("Undefined base function wait_for_tasks")
def wait_for_subworkflows(self, allow_pending=0):
raise RuntimeError("Undefined base function wait_for_subworkflows")
def handle_unknown_target(self, e):
raise RuntimeError("Undefined base function handle_unknown_target")
def init_input_output_vars(self):
# if there is __step_output__ from previous step, use it as default input
# otherwise, reset to empty
if (
"__step_output__" not in env.sos_dict
or env.sos_dict["__step_output__"].unspecified()
):
env.sos_dict.set("step_input", sos_targets([]))
else:
env.sos_dict.set("step_input", env.sos_dict["__step_output__"])
# input can be Undetermined from undetermined output from last step
env.sos_dict.set("_input", copy.deepcopy(env.sos_dict["step_input"]))
# if there is default output for auxiliary steps, use it as step_output and _output
# otherwise reset to unspecified.
if "__default_output__" in env.sos_dict:
# if step is triggered by sos_step, it should not be considered as
# output of the step. #981
env.sos_dict.set(
"__default_output__",
sos_targets(
[
x
for x in env.sos_dict["__default_output__"]._targets
if not isinstance(x, sos_step)
]
),
)
env.sos_dict.set(
"step_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
env.sos_dict.set(
"_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
else:
env.sos_dict.set("step_output", sos_targets([]))
# output is said to be unspecified until output: is used
env.sos_dict.set("_output", sos_targets(_undetermined=True))
env.sos_dict.set("step_depends", sos_targets([]))
env.sos_dict.set("_depends", sos_targets([]))
#
# Common functions
#
def verify_output(self):
missing = sos_targets([])
if env.sos_dict["step_output"] is None:
return
if not env.sos_dict["step_output"].valid():
raise RuntimeError(
"Output of a completed step cannot be undetermined or unspecified."
)
for target in env.sos_dict["step_output"]:
if isinstance(target, (sos_step, invalid_target)):
continue
if isinstance(target, str):
if not file_target(target).target_exists("any"):
if env.config["run_mode"] == "dryrun":
# in dryrun mode, we just create these targets
file_target(target).create_placeholder()
else:
# latency wait for 2 seconds because the file system might be slow
if env.config["run_mode"] == "run":
time.sleep(2)
if not file_target(target).target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]} (curdir={os.getcwd()})'
)
elif not target.target_exists("any"):
if env.config["run_mode"] == "dryrun":
target.create_placeholder()
else:
if env.config["run_mode"] == "run":
time.sleep(2)
if not target.target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]}'
)
return missing
# directive input
def process_input_args(self, ifiles: sos_targets, **kwargs):
if ifiles.unspecified():
env.sos_dict.set("step_input", sos_targets([]))
env.sos_dict.set("_input", sos_targets([]))
env.sos_dict.set("step_output", sos_targets())
return [sos_targets([])], [{}]
assert isinstance(ifiles, sos_targets)
if env.sos_dict.get("__dynamic_input__", False):
runner = self.verify_dynamic_targets(
[x for x in ifiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# input file is the filtered files
env.sos_dict.set("step_input", ifiles)
env.sos_dict.set("_input", ifiles)
if ifiles._num_groups() == 0:
ifiles._group("all")
#
return ifiles.groups
def verify_dynamic_targets(self, target):
yield None
return True
def process_depends_args(self, dfiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_DEPENDS_OPTIONS:
raise RuntimeError(f"Unrecognized depends option {k}")
if dfiles.undetermined():
raise ValueError(r"Depends needs to handle undetermined")
if env.sos_dict.get("__dynamic_depends__", False):
runner = self.verify_dynamic_targets(
[x for x in dfiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.set("_depends", dfiles)
env.sos_dict.set("step_depends", dfiles)
def process_output_args(self, ofiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_OUTPUT_OPTIONS:
raise RuntimeError(f"Unrecognized output option {k}")
if ofiles._num_groups() > 0:
if ofiles._num_groups() == 1:
ofiles = ofiles._get_group(0)
elif ofiles._num_groups() != len(self._substeps):
raise RuntimeError(
f"Inconsistent number of output ({ofiles._num_groups()}) and input ({len(self._substeps)}) groups."
)
else:
ofiles = ofiles._get_group(env.sos_dict["_index"])
# create directory
if ofiles.valid():
parents = set(
[
os.path.abspath(os.path.join(ofile, os.pardir))
for ofile in ofiles
if isinstance(ofile, file_target)
]
)
for parent_dir in parents:
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir, exist_ok=True)
# set variables
env.sos_dict.set("_output", ofiles)
env.sos_dict.set("step_output", ofiles)
#
for ofile in ofiles:
oname = ofile.target_name()
if oname in self._all_outputs:
raise ValueError(
f'Output {ofile} from substep {env.sos_dict["_index"]} of {env.sos_dict["__num_groups__"]} substeps overlaps with output from a previous substep.'
)
self._all_outputs.add(oname)
def submit_task(self, task_info):
if self.task_manager is None:
if self.step.task_params:
for key in ("trunk_size", "trunk_workers", "queue"):
val = get_value_of_param(
key, self.step.task_params, extra_dict=env.sos_dict.dict()
)
if val:
env.sos_dict["_runtime"][key] = val[0]
if "trunk_size" in env.sos_dict["_runtime"]:
trunk_size = env.sos_dict["_runtime"]["trunk_size"]
if trunk_size is None or trunk_size <= 0:
trunk_size = env.sos_dict["__num_groups__"]
if not isinstance(trunk_size, int):
raise ValueError(
f'An integer value or None is expected for runtime option trunk_size, "{trunk_size}" provided'
)
else:
trunk_size = 1
if "trunk_workers" in env.sos_dict["_runtime"]:
if "nodes" in env.sos_dict["_runtime"]:
raise ValueError(
'Option "trunk_workers" that specifies number of nodes and processes for the execution '
'of single-node jobs and option "nodes" that specifies number of nodes for single multi-node '
"jobs cannot be used at the same time."
)
trunk_workers = env.sos_dict["_runtime"]["trunk_workers"]
else:
trunk_workers = None
# if 'queue' in env.sos_dict['_runtime'] and env.sos_dict['_runtime']['queue']:
# host = env.sos_dict['_runtime']['queue']
# else:
# # otherwise, use workflow default
# host = '__default__'
self.task_manager = TaskManager(
env.sos_dict["__num_groups__"], trunk_size, trunk_workers
)
task_id = task_info["task_id"]
task_index = task_info["index"]
if task_id is None:
self.task_manager.set(task_index, None)
return None
taskdef = task_info["task_def"]
task_vars = task_info["task_vars"]
# 618
# it is possible that identical tasks are executed (with different underlying random numbers)
# we should either give a warning or produce different ids...
if self.task_manager.index_of(task_id) >= 0:
raise RuntimeError(
f'Task {task_id} generated for (_index={env.sos_dict["_index"]}) is identical to a previous one (_index={self.task_manager.index_of(task_id)}).'
)
elif self.task_manager.has_output(task_vars["_output"]):
raise RuntimeError(
f'Task produces output files {", ".join(task_vars["_output"])} that are output of other tasks.'
)
# if no trunk_size, the job will be submitted immediately
# otherwise tasks will be accumulated and submitted in batch
self.task_manager.set(task_index, (task_id, taskdef, task_vars["_output"]))
tasks = self.task_manager.get_job()
if tasks:
self.submit_tasks(tasks)
return task_id
def wait_for_results(self, all_submitted):
# this is a generator function because wait_for_tasks is a generator
# function and needs to yield to the caller
if self.concurrent_substep:
try:
runner = self.wait_for_substep()
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
if self.task_manager is None:
return {}
#
# report task
# what we should do here is to get the alias of the Host
# because it can be different (e.g. not localhost
queue = env.sos_dict["_runtime"]["queue"]
# submit the last batch of tasks
tasks = self.task_manager.get_job(all_tasks=True)
if tasks:
self.submit_tasks(tasks)
# waiting for results of specified IDs
try:
# 1218
runner = self.wait_for_tasks(
self.task_manager._submitted_tasks, all_submitted
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
results = e.value
for id, result in results.items():
# turn to string to avoid naming lookup issue
rep_result = {
x: (y if isinstance(y, (int, bool, float, str)) else short_repr(y))
for x, y in result.items()
}
rep_result["tags"] = " ".join(self.task_manager.tags(id))
rep_result["queue"] = queue
send_message_to_controller(["workflow_sig", "task", id, repr(rep_result)])
self.task_manager.clear_submitted()
# if in dryrun mode, we display the output of the dryrun task
if env.config["run_mode"] == "dryrun":
tid = list(results.keys())[0]
tf = TaskFile(tid)
if tf.has_stdout():
print(TaskFile(tid).stdout)
for idx, task in self.proc_results.items():
# if it is done
if isinstance(task, dict):
continue
if task in results:
self.proc_results[idx] = results[task]
else:
# can be a subtask
for _, mres in results.items():
if "subtasks" in mres and task in mres["subtasks"]:
self.proc_results[idx] = mres["subtasks"][task]
# elif 'exception' in mres:
# self.proc_results[idx] = mres
#
# check if all have results?
if any(isinstance(x, str) for x in self.proc_results.values()):
raise RuntimeError(
f'Failed to get results for tasks {", ".join(x for x in self.proc_results.values() if isinstance(x, str))}'
)
#
for idx, res in self.proc_results.items():
if "skipped" in res and res["skipped"]:
self.completed["__task_skipped__"] += 1
# complete case: task skipped
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
# complete case: task completed
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.completed["__task_completed__"] += 1
if "shared" in res:
self.shared_vars[idx].update(res["shared"])
def log(self, stage=None, msg=""):
if stage == "start":
env.logger.info(
f'{"Checking" if env.config["run_mode"] == "dryrun" else "Running"} ``{self.step.step_name(True)}``: {self.step.comment.strip()}'
)
elif stage == "input statement":
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("STEP", f"Handling input statement {msg}")
elif stage == "_input":
if env.sos_dict["_input"] is not None and len(env.sos_dict["_input"]) > 0:
env.logger.debug(
f'_input: ``{short_repr(env.sos_dict["_input"])}``{msg}'
)
elif stage == "_depends":
if env.sos_dict["_depends"] is not None:
env.logger.debug(
f'_depends: ``{short_repr(env.sos_dict["_depends"])}``{msg}'
)
elif stage == "input":
if env.sos_dict["step_input"] is not None:
env.logger.info(
f'input: ``{short_repr(env.sos_dict["step_input"])}``{msg}'
)
elif stage == "output":
if (
env.sos_dict["step_output"] is not None
and len(env.sos_dict["step_output"]) > 0
):
env.logger.info(
f'``{self.step.step_name(True)}`` output: ``{short_repr(env.sos_dict["step_output"])}``{msg}'
)
def execute(self, stmt, return_result=False):
try:
self.last_res = SoS_exec(
stmt,
return_result=return_result or env.config["run_mode"] == "interactive",
)
if return_result:
return self.last_res
except (StopInputGroup, TerminateExecution, UnavailableLock):
raise
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr)
except ArgumentError:
raise
except ProcessKilled:
raise
except KeyboardInterrupt as e:
raise RuntimeError(get_traceback_msg(e))
except Exception as e:
raise RuntimeError(get_traceback_msg(e))
def prepare_substep(self):
# socket to collect result
self.result_pull_socket = create_socket(
env.zmq_context, zmq.PULL, "substep result collector"
)
local_ip = get_localhost_ip()
port = self.result_pull_socket.bind_to_random_port(f"tcp://{local_ip}")
env.config["sockets"]["result_push_socket"] = f"tcp://{local_ip}:{port}"
def submit_substep(self, param):
send_message_to_controller(["substep", param])
def process_returned_substep_result(self, till=None, wait=True):
while True:
if not wait:
# 1213
cur_index = env.sos_dict["_index"]
pending_substeps = cur_index - self._completed_concurrent_substeps + 1
if pending_substeps < (
100
if isinstance(self.concurrent_substep, bool)
else self.concurrent_substep
):
if not self.result_pull_socket.poll(0):
return
elif (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
# if there are more than 100 pending substeps
# we wait indefinitely for the results
env.log_to_file(
"STEP",
f"Wait for more substeps to be done before submitting. (index={cur_index}, processed={self._completed_concurrent_substeps})",
)
elif self._completed_concurrent_substeps == till:
return
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
if isinstance(res["exception"], ProcessKilled):
raise res["exception"]
elif isinstance(res["exception"], RemovedTarget):
pass
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"""Ignoring error from ``{self.step.step_name(True)}`` {idx_msg}: {res["exception"]}."""
)
res["output"] = sos_targets(invalid_target())
elif env.config["error_mode"] == "abort":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
# try to stop everything but wait till for submitted tasks to
# complete
self._completed_concurrent_substeps + 1
waiting = till - 1 - self._completed_concurrent_substeps
env.logger.warning(
f'``{self.step.step_name(True)}`` {idx_msg} returns an error.{f" Terminating step after completing {waiting} submitted substeps." if waiting else " Terminating now."}'
)
for i in range(waiting):
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
self.exec_error.append(
f'index={res["index"]}', res["exception"]
)
raise self.exec_error
else:
# default or unspecified
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
#
if "index" not in res:
raise RuntimeError(
"Result received from substep does not have key index"
)
if "task_id" in res:
task = self.submit_task(res)
# if substep returns tasks, ...
if res["task_id"]:
self.proc_results[res["index"]] = task
else:
# if there is no task_id, the substep must have
# been skipped.
self.proc_results[res["index"]] = res
else:
self.proc_results[res["index"]] = res
self._completed_concurrent_substeps += 1
def wait_for_substep(self):
while self._completed_concurrent_substeps < len(self.proc_results):
try:
runner = self.process_returned_substep_result(
till=len(self.proc_results), wait=True
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
def collect_result(self):
# only results will be sent back to the master process
#
# __step_input__: input of this step
# __steo_output__: output of this step
# __step_depends__: dependent files of this step
result = {
"__step_input__": env.sos_dict["step_input"],
"__step_output__": env.sos_dict["step_output"],
"__step_depends__": env.sos_dict["step_depends"],
"__step_name__": env.sos_dict["step_name"],
"__completed__": self.completed,
}
result["__last_res__"] = self.last_res
result["__shared__"] = {}
if "shared" in self.step.options:
result["__shared__"] = self.shared_vars
for x in result["__step_output__"].targets:
if isinstance(x, sos_variable):
result["__shared__"][x.target_name()] = env.sos_dict[x.target_name()]
send_message_to_controller(
[
"progress",
"step_completed",
-1
if "sos_run" in env.sos_dict["__signature_vars__"]
else self.completed["__step_completed__"],
env.sos_dict["step_name"],
env.sos_dict["step_output"],
]
)
return result
def set_task_queue_from_task_params(self):
if self.step.task_params:
try:
task_queue = get_value_of_param(
"queue", self.step.task_params, extra_dict=env.sos_dict.dict()
)
if task_queue:
env.sos_dict["_runtime"]["queue"] = task_queue[0]
except Exception as e:
raise ValueError(
f"Failed to determine value of parameter queue of {self.step.task_params}: {e}"
)
# # check concurrent #1134
# try:
# task_concurrency = get_value_of_param(
# 'concurrent',
# self.step.task_params,
# extra_dict=env.sos_dict.dict())
# if task_concurrency:
# env.sos_dict['_runtime']['concurrent'] = task_concurrency[0]
# except Exception as e:
# raise ValueError(
# f'Failed to determine value of parameter queue of {self.step.task_params}: {e}'
# )
# if -q is unspecified and option queue is unspecified,
# or queue=None is specified, disregard the task keyword
if (
env.config["default_queue"] is None
and "queue" not in env.sos_dict["_runtime"]
) or (
"queue" in env.sos_dict["_runtime"]
and env.sos_dict["_runtime"]["queue"] is None
):
# remove task statement
if len(self.step.statements) >= 1 and self.step.statements[-1][0] == "!":
self.step.statements[-1][1] += "\n" + self.step.task
else:
self.step.statements.append(["!", self.step.task])
self.step.task = None
# is queue is unspecified, it take value from command line
# in this case -q should have been specified
elif "queue" not in env.sos_dict["_runtime"]:
env.sos_dict["_runtime"]["queue"] = env.config["default_queue"]
def local_exec_without_signature(self, statement):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP", f'Execute substep {env.sos_dict["step_name"]} without signature'
)
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
finally:
if not self.step.task:
# if no task, this step is __completed
# complete case: local skip without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
def local_exec_with_signature(self, statement, sig):
idx = env.sos_dict["_index"]
# signature might be built outside of the function
# not in a debug mode delayed to now
if sig is None:
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
# if singaure match, we skip the substep even if
# there are tasks.
matched = validate_step_sig(sig)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
return True
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
sig.lock()
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
finally:
# if this is the end of substep, save the signature
# otherwise we need to wait for the completion
# of the task.
if not self.step.task:
if env.sos_dict["step_output"].undetermined():
output = reevaluate_output()
self.output_groups[env.sos_dict["_index"]] = output
sig.set_output(output)
sig.write()
# complete case : local execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
self.pending_signatures[idx] = sig
sig.release()
return False
def skip_substep(self):
idx = env.sos_dict["_index"]
# if concurrent substep, there might be later steps that needs to be rerun
# and we need to mark some steps has been completed.
if self.concurrent_substep:
self._completed_concurrent_substeps += 1
self.proc_results[idx] = {
"index": idx,
"ret_code": 0,
"output": copy.deepcopy(env.sos_dict["_output"]),
}
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
def concurrent_exec(self, statement, sig=None):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} {idx} concurrently with {self._completed_concurrent_substeps} completed',
)
# the ignatures are supposed to be written by substep worker, however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if (
env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
and self.step.task
):
self.pending_signatures[idx] = (
sig
if sig
else RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
)
#
# step_output: needed only when it is undetermined
# step_input: not needed
# _input, _output, _depends, _index: needed
# step_name: for debug scripts
# step_id, workflow_id: for reporting to controller
# '__signature_vars__' to be used for signature creation
#
# __step_context__ is not needed because substep
# executor does not support nested workflow
proc_vars = (
env.sos_dict["__signature_vars__"]
| env.sos_dict["__environ_vars__"]
| {
"_input",
"_output",
"_depends",
"_index",
"step_output",
"step_name",
"_runtime",
"step_id",
"workflow_id",
"__num_groups__",
"__signature_vars__",
}
)
self.proc_results[env.sos_dict["_index"]] = {}
self.submit_substep(
dict(
stmt=statement[1],
global_def=self.step.global_def,
# 1225: the step might contain large variables from global section, but
# we do not have to sent them if they are not used in substeps.
cwd=os.getcwd(),
global_vars={
x: y
for x, y in self.step.global_vars.items()
if x in env.sos_dict["__signature_vars__"]
},
task=self.step.task,
task_params=self.step.task_params,
proc_vars=env.sos_dict.clone_selected_vars(proc_vars),
shared_vars=self.vars_to_be_shared,
config=env.config,
)
)
def check_task_sig(self):
idx = env.sos_dict["_index"]
sig = RuntimeInfo(
statementMD5([self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
env.log_to_file(
"STEP",
f'Check task-only step {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[env.sos_dict["_index"]] = matched["output"]
self.shared_vars[env.sos_dict["_index"]].update(matched["vars"])
# complete case: step with task ignored
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.pending_signatures[idx] = sig
return skip_index
# def is_task_active(self):
# active = env.sos_dict['_runtime']['active']
# env.logger.error(active)
# if active is True:
# return True
# elif active is False:
# return False
# elif isinstance(active, int):
# if active >= 0 and env.sos_dict['_index'] != active:
# return False
# if active < 0 and env.sos_dict[
# '_index'] != active + env.sos_dict['__num_groups__']:
# return False
# return True
# elif isinstance(active, Sequence):
# allowed_index = list([
# x if x >= 0 else env.sos_dict['__num_groups__'] + x
# for x in active
# ])
# return env.sos_dict['_index'] in allowed_index
# elif isinstance(active, slice):
# allowed_index = list(range(env.sos_dict['__num_groups__']))[active]
# return env.sos_dict['_index'] in allowed_index
# else:
# raise RuntimeError(
# f'Unacceptable value for option active: {active}')
def check_results(self):
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] == 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
# now that output is settled, we can write remaining signatures
for idx, res in self.proc_results.items():
if (
self.pending_signatures[idx] is not None
and res["ret_code"] == 0
and "sig_skipped" not in res
):
# task might return output with vars #1355
self.pending_signatures[idx].set_output(self.output_groups[idx])
self.pending_signatures[idx].write()
if res["ret_code"] != 0 and "output" in res:
clear_output(output=res["output"])
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] != 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
if "exception" in proc_result:
excp = proc_result["exception"]
if isinstance(excp, StopInputGroup):
if excp.message:
env.logger.info(excp.message)
self.output_groups[proc_result["index"]] = sos_targets([])
elif isinstance(excp, RemovedTarget):
raise excp
elif "task" in proc_result:
if env.config["error_mode"] == "ignore":
env.logger.warning(f"Ignore failed task {proc_result['task']}.")
# if the exception is from a task...
self.exec_error.append(proc_result["task"], excp)
else:
self.exec_error.append(
RuntimeError(
f"Substep failed with return code {proc_result['ret_code']}"
)
)
# this is after all substeps have been completed
if self.exec_error.errors:
raise self.exec_error
def calculate_completed(self):
substeps = (
self.completed["__substep_completed__"]
+ self.completed["__substep_skipped__"]
)
self.completed["__step_completed__"] = (
self.completed["__substep_completed__"] / substeps
)
self.completed["__step_skipped__"] = (
self.completed["__substep_skipped__"] / substeps
)
if self.completed["__step_completed__"].is_integer():
self.completed["__step_completed__"] = int(
self.completed["__step_completed__"]
)
if self.completed["__step_skipped__"].is_integer():
self.completed["__step_skipped__"] = int(self.completed["__step_skipped__"])
def run(self):
# return value of the last executed statement
self.last_res = None
self.start_time = time.time()
self.completed = defaultdict(int)
#
# prepare environments, namely variables that can be used by the step
#
# * step_name: name of the step, can be used by step process to determine
# actions dynamically.
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set("__last_step__", self.step.last_step)
self.log("start")
env.sos_dict.set(
"step_id",
textMD5(
f'{env.sos_dict["workflow_id"]} {env.sos_dict["step_name"]} {self.step.md5}'
),
)
env.sos_dict.set("master_id", env.config["master_id"])
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
env.sos_dict.set("_runtime", {})
# * input: input files, which should be __step_output__ if it is defined, or
# None otherwise.
# * _input: first batch of input, which should be input if no input statement is used
# * output: None at first, can be redefined by output statement
# * _output: None at first, can be redefined by output statement
# * depends: None at first, can be redefined by depends statement
# * _depends: None at first, can be redefined by depends statement
#
self.init_input_output_vars()
# _index is needed for pre-input action's active option and for debug output of scripts
env.sos_dict.set("_index", 0)
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"STEP",
f'Executing step {env.sos_dict["step_name"]} with step_input {env.sos_dict["step_input"]} and step_output {env.sos_dict["step_output"]}',
)
self.set_task_queue_from_task_params()
# look for input statement.
input_statement_idx = [
idx
for idx, x in enumerate(self.step.statements)
if x[0] == ":" and x[1] == "input"
]
if not input_statement_idx:
input_statement_idx = None
elif len(input_statement_idx) == 1:
input_statement_idx = input_statement_idx[0]
else:
raise ValueError(
f"More than one step input are specified in step {self.step.step_name(True)}"
)
# if shared is true, we have to disable concurrent because we
# do not yet return anything from shared.
self.concurrent_substep = "shared" not in self.step.options
# and \
# ('concurrent' not in env.sos_dict['_runtime'] or env.sos_dict['_runtime']['concurrent'] is True)
if input_statement_idx is not None:
# execute before input stuff
for statement in self.step.statements[:input_statement_idx]:
if statement[0] == ":":
# wait for all dependent targets to be resolved to be resolved
key, value = statement[1:3]
if key != "depends":
raise ValueError(f"Step input should be specified before {key}")
while True:
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(dfiles, **kwargs)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
break
else:
try:
# 1354
# if there are definition before input, the definitions and imports
# must be added to global_def in order to be executed by substeps
if any(x in statement[1] for x in ("class", "def", "import")):
step_def = KeepOnlyImportAndDefine().visit(
ast.parse(statement[1])
)
if step_def.body:
if isinstance(self.step.global_def, ast.Module):
self.step.global_def.body.extend(step_def.body)
else:
self.step.global_def = step_def
self.execute(statement[1])
except StopInputGroup as e:
# stop before substeps, because there is no output statement before it
# we do not have to worry about keep_output
if e.message:
env.logger.info(e.message)
return self.collect_result()
# input statement
stmt = self.step.statements[input_statement_idx][2]
self.log("input statement", stmt)
while True:
# wait for all targets to be resovled
try:
args, kwargs = SoS_eval(
f"__null_func__({stmt})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# Files will be expanded differently with different running modes
input_files: sos_targets = expand_input_files(
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_INPUT_OPTIONS
},
)
runner = self.process_input_args(
input_files,
**{k: v for k, v in kwargs.items() if k in SOS_INPUT_OPTIONS},
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
self._substeps = e.value
#
if "concurrent" in kwargs and self.concurrent_substep:
# concurrent can be True/False or an integer
self.concurrent_substep = kwargs["concurrent"]
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise ValueError(f"Failed to process input statement {stmt}: {e}")
break
input_statement_idx += 1
elif env.sos_dict["step_input"].groups:
# if default has groups...
# default case
self._substeps = env.sos_dict["step_input"].groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else:
# default case
self._substeps = [env.sos_dict["step_input"]]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self.proc_results = {}
self.vars_to_be_shared = set()
if "shared" in self.step.options:
self.vars_to_be_shared = parse_shared_vars(self.step.options["shared"])
self.vars_to_be_shared = sorted(
[
x[5:] if x.startswith("step_") else x
for x in self.vars_to_be_shared
if x not in ("step_", "step_input", "step_output", "step_depends")
]
)
self.shared_vars = [{} for x in self._substeps]
# run steps after input statement, which will be run multiple times for each input
# group.
env.sos_dict.set("__num_groups__", len(self._substeps))
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index, which can remain to be None if no output
# is defined.
self.output_groups = [sos_targets([]) for x in self._substeps]
self.depends_groups = [sos_targets([]) for x in self._substeps]
# used to prevent overlapping output from substeps
self._all_outputs = set()
self._subworkflow_results = []
if (
any("sos_run" in x[1] for x in self.step.statements[input_statement_idx:])
and "shared" not in self.step.options
and not self.step.task
and self.step.statements[-1][0] == "!"
and (len(self.step.statements) == 1 or self.step.statements[-2][0] == ":")
and is_sos_run_the_only_last_stmt(self.step.statements[-1][1])
):
env.sos_dict.set("__concurrent_subworkflow__", True)
if self.concurrent_substep:
if len(self._substeps) <= 1 or env.config["run_mode"] == "dryrun":
self.concurrent_substep = False
elif any(
"sos_run" in x[1] for x in self.step.statements[input_statement_idx:]
):
self.concurrent_substep = False
env.logger.debug(
"Substeps are executed sequentially because of existence of multiple nested workflow."
)
else:
self.prepare_substep()
try:
self.completed["__substep_skipped__"] = 0
self.completed["__substep_completed__"] = len(self._substeps)
self._completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
self.pending_signatures = [None for x in self._substeps]
for idx, g in enumerate(self._substeps):
#
# https://github.com/vatlab/sos/issues/1376
#
# [default]
# input: for_each=dict(i=range(1000))
# sos_run('a', t=i)
#
# when we have workflow like the following when steps
# are executed quickly with subworkflows submitted to the master
# the master process could be swamped with subworkflows, causing
# "too many open files".
#
# the following code will stop the step from continued
# execution and wait for the subworkflows to complete.
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(
allow_pending=env.config["worker_procs"]
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# other variables
#
_vars = {}
# now, let us expose target level variables as lists
if len(g) > 1:
names = set.union(*[set(x._dict.keys()) for x in g._targets])
elif len(g) == 1:
names = set(g._targets[0]._dict.keys())
else:
names = set()
for name in names:
_vars[name] = [x.get(name) for x in g._targets]
# then we expose all group level variables
_vars.update(g._dict)
_vars.update(env.sos_dict["step_input"]._dict)
env.sos_dict.update(_vars)
env.sos_dict.set("_input", copy.deepcopy(g))
# set vars to _input
# env.sos_dict['_input'].set(**v)
self.log("_input")
env.sos_dict.set("_index", idx)
if env.config["error_mode"] == "ignore":
missed = [x for x in g.targets if not x.target_exists()]
if missed:
if any(isinstance(x, invalid_target) for x in missed):
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to invalid input caused by previous failed substep.'
)
else:
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to missing input {sos_targets(missed)}'
)
self.output_groups[idx] = sos_targets(invalid_target())
env.sos_dict.set("_output", sos_targets(invalid_target()))
self.skip_substep()
continue
# in interactive mode, because sos_dict are always shared
# execution of a substep, especially when it calls a nested
# workflow, would change step_name, __step_context__ etc, and
# we will have to reset these variables to make sure the next
# substep would execute normally. Batch mode is immune to this
# problem because nested workflows are executed in their own
# process/context etc
if env.config["run_mode"] == "interactive":
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set(
"step_id",
hash(
(
env.sos_dict["workflow_id"],
env.sos_dict["step_name"],
self.step.md5,
)
),
)
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
#
pre_statement = []
if (
not any(
st[0] == ":" and st[1] == "output"
for st in self.step.statements[input_statement_idx:]
)
and "__default_output__" in env.sos_dict
):
pre_statement = [[":", "output", "_output"]]
# if there is no statement, no task, claim success
post_statement = []
if not self.step.statements or self.step.statements[-1][0] != "!":
if self.step.task:
# if there is only task, we insert a fake statement so that it can be executed by the executor
post_statement = [["!", ""]]
else:
# complete case: no step, no statement
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
all_statements = (
pre_statement
+ self.step.statements[input_statement_idx:]
+ post_statement
)
self.is_input_verified = True
for statement_idx, statement in enumerate(all_statements):
is_last_runblock = statement_idx == len(all_statements) - 1
# if input is undertermined, we can only process output:
if not g.valid() and statement[0] != ":":
raise RuntimeError("Undetermined input encountered")
if statement[0] == ":":
key, value = statement[1:3]
# output, depends, and process can be processed multiple times
while True:
# loop for all unresolved targets to be resolved
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# dynamic output or dependent files
if key == "output":
# if output is defined, its default value needs to be cleared
if idx == 0:
env.sos_dict.set("step_output", sos_targets())
ofiles: sos_targets = expand_output_files(
value,
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_OUTPUT_OPTIONS
},
)
if g.valid() and ofiles.valid():
if any(
x in g._targets
for x in ofiles
if not isinstance(x, sos_step)
):
raise RuntimeError(
f'Overlapping input and output files: {", ".join(repr(x) for x in ofiles if x in g)}'
)
# set variable _output and output
self.process_output_args(
ofiles,
**{
k: v
for k, v in kwargs.items()
if k in SOS_OUTPUT_OPTIONS
},
)
self.output_groups[idx] = env.sos_dict["_output"]
elif key == "depends":
try:
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(
dfiles, **kwargs
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
self.depends_groups[idx] = env.sos_dict[
"_depends"
]
self.log("_depends")
except Exception:
# env.logger.info(e)
raise
else:
raise RuntimeError(f"Unrecognized directive {key}")
# everything is ok, break
break
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
# if input is Undertermined, it is possible that output cannot be processed
# due to that, and we just return
if not g.valid():
env.logger.debug(e)
return self.collect_result()
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
elif is_last_runblock:
if (
env.config["sig_mode"] == "skip"
and not self.vars_to_be_shared
and "sos_run" not in statement[1]
and not env.sos_dict["_output"].unspecified()
and len(env.sos_dict["_output"]) > 0
and all(
x.target_exists()
for x in env.sos_dict["_output"].targets
)
and env.sos_dict["_output"].later_than(
env.sos_dict["_input"]
)
):
self.skip_substep()
env.logger.info(
f'``{env.sos_dict["step_name"]}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``skipped`` with existing output.'
)
skip_index = True
# do not execute the rest of the statement
break
#
# default mode, check if skipping substep
sig = None
if (
env.config["sig_mode"]
not in ("ignore", "distributed", "build")
and not env.sos_dict["_output"].unspecified()
):
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if skip_index:
# matched["output"] might hav vars not defined in "output" #1355
env.sos_dict.set("_output", matched["output"])
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
self.skip_substep()
break
try:
if self.concurrent_substep:
self.concurrent_exec(statement, sig)
# we check if the previous task has been completed and process them
# because further steps might need to be done
try:
runner = self.process_returned_substep_result(
till=idx + 1, wait=False
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
elif (
env.config["sig_mode"] == "ignore"
or env.sos_dict["_output"].unspecified()
):
self.local_exec_without_signature(statement)
else:
skip_index = self.local_exec_with_signature(
statement, sig
)
if skip_index:
self.skip_substep()
break
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception as e:
clear_output()
if env.config["error_mode"] == "abort":
raise
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"{self.step.step_name(True)} {idx_msg} returns no output due to error: {e}"
)
self.output_groups[idx] = sos_targets(invalid_target())
skip_index = True
else:
if env.config["run_mode"] != "interactive":
# default mode
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.error(
f"{self.step.step_name(True)} {idx_msg} returns an error."
)
self.exec_error.append(str(idx), e)
else:
# if it is not the last statement group (e.g. statements before :output)
# we execute locally without anything like signature
if self.is_input_verified:
verify_input()
self.is_input_verified = False
try:
self.execute(statement[1])
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception:
clear_output()
raise
# if there is no statement , but there are tasks, we should
# check signature here.
if (
(not self.step.statements or self.step.statements[-1][0] != "!")
and self.step.task
and not self.concurrent_substep
and env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
):
skip_index = self.check_task_sig()
# if this index is skipped, go directly to the next one
if skip_index:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
skip_index = False
continue
# if concurrent input group, tasks are handled in substep
if self.concurrent_substep or not self.step.task:
continue
if env.config["run_mode"] == "dryrun" and env.sos_dict["_index"] != 0:
continue
# # check if the task is active
# if 'active' in env.sos_dict['_runtime']:
# if not self.is_task_active():
# continue
#
self.log("task")
try:
task_id, taskdef, task_vars = create_task(
self.step.global_def,
self.step.global_vars,
self.step.task,
self.step.task_params,
)
task = self.submit_task(
{
"index": env.sos_dict["_index"],
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
)
self.proc_results[env.sos_dict["_index"]] = task
except Exception as e:
# FIXME: cannot catch exception from subprocesses
if env.verbosity > 2:
sys.stderr.write(get_traceback())
raise RuntimeError(
f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}'
)
#
# # if not concurrent, we have to wait for the completion of the task
# if 'concurrent' in env.sos_dict['_runtime'] and env.sos_dict[
# '_runtime']['concurrent'] is False:
# # in this case the steps must be executed not concurrently
# runner = self.wait_for_results(all_submitted=False)
# try:
# yreq = next(runner)
# while True:
# yres = yield yreq
# yreq = runner.send(yres)
# except StopIteration:
# pass
#
# endfor loop for each input group
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(allow_pending=0)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.pop("__concurrent_subworkflow__")
runner = self.wait_for_results(all_submitted=True)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
for idx, res in self.proc_results.items():
if "sig_skipped" in res:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
if "output" in res:
self.output_groups[idx] = res["output"]
# check results
self.check_results()
# if error happened but we allow all substeps to be completed, we now
# raise exception
if self.exec_error.errors:
raise self.exec_error
# if output is Undetermined, re-evalulate it
# finalize output from output_groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step, for compatibility.
env.sos_dict.set(
"step_output", sos_targets([])._add_groups(self.output_groups)
)
env.sos_dict.set(
"step_depends", sos_targets([])._add_groups(self.depends_groups)
)
# if there exists an option shared, the variable would be treated as
# provides=sos_variable(), and then as step_output
if "shared" in self.step.options:
self.shared_vars = evaluate_shared(
self.shared_vars, self.step.options["shared"]
)
env.sos_dict.quick_update(self.shared_vars)
missing = self.verify_output()
self.log(
"output",
msg=f'\033[95m missing: {short_repr(missing)} ({len(missing)} item{"s" if len(missing)>1 else ""})\033[0m'
if len(missing) > 0
else "",
)
self.calculate_completed()
def file_only(targets):
if not isinstance(targets, sos_targets):
env.logger.warning(
f"Unexpected input or output target for reporting. Empty list returned: {targets}"
)
return []
return [
(str(x), x.size())
for x in targets._targets
if isinstance(x, file_target)
]
step_info = {
"step_id": self.step.md5,
"start_time": self.start_time,
"stepname": self.step.step_name(True),
"substeps": len(self._substeps),
"input": file_only(env.sos_dict["step_input"]),
"output": file_only(env.sos_dict["step_output"]),
"completed": dict(self.completed),
"end_time": time.time(),
}
send_message_to_controller(
["workflow_sig", "step", env.sos_dict["workflow_id"], repr(step_info)]
)
return self.collect_result()
finally:
if self.concurrent_substep:
close_socket(self.result_pull_socket, "substep collector")
class Step_Executor(Base_Step_Executor):
def __init__(self, step, socket, mode="run"):
self.run_mode = mode
env.config["run_mode"] = mode
super(Step_Executor, self).__init__(step)
self.socket = socket
# because step is executed in a separate SoS_Worker process, this
# __socket__ is available to all the actions that will be executed
# in the step
env.__socket__ = socket
def submit_tasks(self, tasks):
if "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("TASK", f"Send {tasks}")
self.socket.send(
encode_msg(["tasks", env.sos_dict["_runtime"]["queue"]] + tasks)
)
def wait_for_tasks(self, tasks, all_submitted):
# wait for task is a generator function that yields the request
# to the runner
if not tasks:
return {}
# when we wait, the "outsiders" also need to see the tags etc
# of the tasks so we have to write to the database. #156
send_message_to_controller(["commit_sig"])
# wait till the executor responde
results = {}
while True:
# yield an indicator of what is requested, for debugging purpose
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
results.update(res)
# all results have been obtained.
if len(results) == len(tasks):
break
return results
def wait_for_subworkflows(self, allow_pending):
try:
allow_pending = int(allow_pending)
except:
allow_pending = min(max(os.cpu_count() // 2, 2), 8)
while self._subworkflow_results:
if allow_pending > 0:
n_pending = sum(
len(x["pending_workflows"]) for x in self._subworkflow_results
)
if n_pending <= allow_pending:
break
# here we did not check if workflow ids match
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
if not "__workflow_id__" in res:
raise ValueError(f"Unrecognized result from subworkflows: {res}")
# remove from _self._subworkflow_results
result_with_id = [
idx
for idx, x in enumerate(self._subworkflow_results)
if res["__workflow_id__"] in x["pending_workflows"]
]
if not result_with_id:
raise RuntimeError(
f"Failed to identify ID of returned subworkflow: {res}"
)
if len(result_with_id) > 1:
raise RuntimeError(
"Multiple matches of subworkflow ID. This should not happen."
)
self._subworkflow_results[result_with_id[0]]["pending_workflows"].remove(
res["__workflow_id__"]
)
if not self._subworkflow_results[result_with_id[0]]["pending_workflows"]:
self._subworkflow_results.pop(result_with_id[0])
def handle_unknown_target(self, e):
self.socket.send(encode_msg(["missing_target", e.target]))
yield self.socket
res = decode_msg(self.socket.recv())
if not res:
raise e
def verify_dynamic_targets(self, targets):
if not targets:
return
if env.config["trace_existing"]:
traced = targets
else:
traced = [x for x in targets if x.traced]
if not traced:
return
self.socket.send(encode_msg(["dependent_target"] + traced))
yield self.socket
res = decode_msg(self.socket.recv())
if res != "target_resolved":
raise RuntimeError(f"Failed to veryify dependent target {traced}")
def run(self):
try:
try:
# 1218
runner = Base_Step_Executor.run(self)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
res = e.value
if self.socket is not None:
if (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends result {short_repr(res)}",
)
self.socket.send(encode_msg(res))
else:
return res
except RemovedTarget as e:
# removed target needs to be handled differently since the workflow manager
# use type information to get removed targets
if self.socket is not None and not self.socket.closed:
self.socket.send(encode_msg(e))
else:
raise e
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
if isinstance(e, ProcessKilled):
raise
# if not self.exec_error
if e != self.exec_error:
self.exec_error.append(self.step.step_name(), e)
#
if self.exec_error.errors:
if self.socket is not None and not self.socket.closed:
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends exception {self.exec_error}",
)
self.socket.send(encode_msg(self.exec_error))
else:
raise self.exec_error
| true | true |
f715866e13eb002c14554c06f9cadbe3ff57a70a | 322 | py | Python | generator/framework/util/fs.py | sinsay/ds_generator | 9365e22e8730418caf29b8ed6ada1f30f936a297 | [
"Apache-2.0"
] | null | null | null | generator/framework/util/fs.py | sinsay/ds_generator | 9365e22e8730418caf29b8ed6ada1f30f936a297 | [
"Apache-2.0"
] | null | null | null | generator/framework/util/fs.py | sinsay/ds_generator | 9365e22e8730418caf29b8ed6ada1f30f936a297 | [
"Apache-2.0"
] | null | null | null | import os
def mkdir_without_exception(target):
try:
# subprocess.call([
# "mkdir",
# "-p",
# target
# ])
os.makedirs(target, exist_ok=True)
except FileExistsError:
print("the directory %s already exists. continue the next gen phase." % target)
| 23 | 87 | 0.552795 | import os
def mkdir_without_exception(target):
try:
os.makedirs(target, exist_ok=True)
except FileExistsError:
print("the directory %s already exists. continue the next gen phase." % target)
| true | true |
f71586bd484ad9828fd3d9ba20d058a77b29f8ff | 91 | py | Python | app/handlers/__init__.py | Katel212/MyPersonalKitchenBot | 03de0beeaf2665e8b3ddd1709da3d4edcd422b80 | [
"MIT"
] | null | null | null | app/handlers/__init__.py | Katel212/MyPersonalKitchenBot | 03de0beeaf2665e8b3ddd1709da3d4edcd422b80 | [
"MIT"
] | 5 | 2020-12-22T17:53:05.000Z | 2021-04-07T20:00:47.000Z | app/handlers/__init__.py | Katel212/MyPersonalKitchenBot | 03de0beeaf2665e8b3ddd1709da3d4edcd422b80 | [
"MIT"
] | null | null | null | from .errors import *
from .private import *
from .callback import *
from .states import *
| 18.2 | 23 | 0.736264 | from .errors import *
from .private import *
from .callback import *
from .states import *
| true | true |
f71586c2e3611f2c07d319406a22e6a386a06e89 | 695 | py | Python | app/core/management/commands/wait_for_db.py | martinramirezboggio/recipe-app-api | 8f576ae036ba9a55e75a76465e97e0340378572e | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | martinramirezboggio/recipe-app-api | 8f576ae036ba9a55e75a76465e97e0340378572e | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | martinramirezboggio/recipe-app-api | 8f576ae036ba9a55e75a76465e97e0340378572e | [
"MIT"
] | null | null | null | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""django command to pause execution until db is ready"""
def handle(self, *args, **options):
"""handle the command"""
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 30.217391 | 78 | 0.638849 | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| true | true |
f71587d10ef1d1aed4af3fd809bfa4096755e581 | 7,494 | py | Python | e2e/tests/selenium/page_objects.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 10 | 2016-05-03T20:41:25.000Z | 2021-09-17T18:42:01.000Z | e2e/tests/selenium/page_objects.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 655 | 2016-05-04T19:00:35.000Z | 2022-03-28T13:09:20.000Z | e2e/tests/selenium/page_objects.py | p2pu/learning-circles | ccd94208ec18082f8fda6d7f21eacdd71bad6023 | [
"MIT"
] | 8 | 2016-05-06T10:24:27.000Z | 2020-10-21T00:56:59.000Z | from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from e2e.tests.selenium.locators import LearningCircleCreationPageLocators
from e2e.tests.selenium.locators import RegistrationModalLocators
import datetime
import time
class BasePage(object):
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def fill_text_field(self, locator, *text):
input_field = self.driver.find_element(*locator)
try:
input_field.clear()
except:
pass
finally:
input_field.send_keys(*text)
def fill_rich_text_field(self, locator, *text):
tinymce_iframe = self.wait.until(expected_conditions.presence_of_element_located(locator))
self.driver.switch_to_frame(tinymce_iframe)
rich_text_field = self.wait.until(expected_conditions.presence_of_element_located(LearningCircleCreationPageLocators.TINYMCE_FIELD))
rich_text_field.send_keys(*text)
self.driver.switch_to_default_content()
class LearningCircleCreationPage(BasePage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fill_out_form_correctly(self):
self.select_first_course()
self.click_next_button()
self.fill_city_select_field("Kitchener")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_NAME_FIELD, "KPL")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_DETAILS_FIELD, "Hacienda Cafe")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_ADDRESS_FIELD, "85 Queen St N, Kitchener")
self.click_next_button()
self.select_start_date()
self.select_suggested_dates()
self.wait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '#selected-dates li')))
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_TIME_FIELD, "7:00 PM", Keys.ENTER)
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_END_TIME_FIELD, "8:00 PM", Keys.ENTER)
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.TITLE_FIELD, "Sharon's Learning Circle")
self.fill_rich_text_field(LearningCircleCreationPageLocators.DESCRIPTION_FIELD, "Welcome to my learning circle!")
self.fill_rich_text_field(LearningCircleCreationPageLocators.COURSE_DESCRIPTION_FIELD, "This is the course description")
self.fill_text_field(LearningCircleCreationPageLocators.SIGNUP_QUESTION_FIELD, "What do you want to learn?")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_WEBSITE_FIELD, "https://www.kpl.org")
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_GOAL_FIELD, "Have a great learning circle")
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_CONCERNS_FIELD, "Nothing really")
def select_start_date(self):
calendar_date = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CALENDAR_TODAY))
calendar_date.click()
def select_suggested_dates(self):
btn = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ACCEPT_SUGGESTED_DATES_BUTTON))
# use this instead of btn.click() since the button is out of view
self.driver.execute_script("return arguments[0].click();", btn)
def select_first_course(self):
course_cards = self.wait.until(expected_conditions.visibility_of_all_elements_located(LearningCircleCreationPageLocators.COURSE_CARDS))
self.wait.until(expected_conditions.text_to_be_present_in_element(LearningCircleCreationPageLocators.FIRST_COURSE_TITLE, "Academic Writing"))
course_select_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.FIRST_COURSE_BUTTON))
# button is out of view
self.driver.execute_script("return arguments[0].click();", course_select_button)
# wait until search container is gone
self.wait.until_not(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '.search-container')))
remove_link = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators. REMOVE_COURSE_SELECTION_LINK))
assert 'Remove selection' in remove_link.text
def fill_city_select_field(self, location):
city_select = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators.CITY_SELECT_INPUT))
city_select.send_keys(location)
self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CITY_SELECT_OPTION))
city_select.send_keys(Keys.ENTER)
def click_next_button(self):
next_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.NEXT_TAB_BUTTON))
next_button.click()
def click_publish_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.PUBLISH_BUTTON))
publish_button.click()
def click_save_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SAVE_BUTTON))
publish_button.click()
def click_modal_button(self):
modal_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.MODAL_BUTTON))
modal_button.click()
def click_schedule_meetings_button(self):
meetings_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SCHEDULE_MEETINGS_BUTTON))
meetings_button.click()
def click_login_link(self):
self.driver.find_element_by_css_selector('.registration-modal-content button:first-child').click()
def fill_out_login_modal(self, user_data):
self.fill_text_field(RegistrationModalLocators.EMAIL_FIELD, user_data["email"])
self.fill_text_field(RegistrationModalLocators.PASSWORD_FIELD, user_data["password"])
self.driver.find_element(*RegistrationModalLocators.SUBMIT_BUTTON).click()
def go_to_tab_1(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_1))
tab_button.click()
def go_to_tab_2(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_2))
tab_button.click()
def go_to_tab_3(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_3))
tab_button.click()
def go_to_tab_4(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_4))
tab_button.click()
def go_to_tab_5(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_5))
tab_button.click()
def close_alert(self):
close_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ALERT_CLOSE_BUTTON))
close_button.click()
| 49.96 | 154 | 0.777022 | from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from e2e.tests.selenium.locators import LearningCircleCreationPageLocators
from e2e.tests.selenium.locators import RegistrationModalLocators
import datetime
import time
class BasePage(object):
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def fill_text_field(self, locator, *text):
input_field = self.driver.find_element(*locator)
try:
input_field.clear()
except:
pass
finally:
input_field.send_keys(*text)
def fill_rich_text_field(self, locator, *text):
tinymce_iframe = self.wait.until(expected_conditions.presence_of_element_located(locator))
self.driver.switch_to_frame(tinymce_iframe)
rich_text_field = self.wait.until(expected_conditions.presence_of_element_located(LearningCircleCreationPageLocators.TINYMCE_FIELD))
rich_text_field.send_keys(*text)
self.driver.switch_to_default_content()
class LearningCircleCreationPage(BasePage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fill_out_form_correctly(self):
self.select_first_course()
self.click_next_button()
self.fill_city_select_field("Kitchener")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_NAME_FIELD, "KPL")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_DETAILS_FIELD, "Hacienda Cafe")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_ADDRESS_FIELD, "85 Queen St N, Kitchener")
self.click_next_button()
self.select_start_date()
self.select_suggested_dates()
self.wait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '#selected-dates li')))
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_TIME_FIELD, "7:00 PM", Keys.ENTER)
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_END_TIME_FIELD, "8:00 PM", Keys.ENTER)
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.TITLE_FIELD, "Sharon's Learning Circle")
self.fill_rich_text_field(LearningCircleCreationPageLocators.DESCRIPTION_FIELD, "Welcome to my learning circle!")
self.fill_rich_text_field(LearningCircleCreationPageLocators.COURSE_DESCRIPTION_FIELD, "This is the course description")
self.fill_text_field(LearningCircleCreationPageLocators.SIGNUP_QUESTION_FIELD, "What do you want to learn?")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_WEBSITE_FIELD, "https://www.kpl.org")
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_GOAL_FIELD, "Have a great learning circle")
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_CONCERNS_FIELD, "Nothing really")
def select_start_date(self):
calendar_date = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CALENDAR_TODAY))
calendar_date.click()
def select_suggested_dates(self):
btn = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ACCEPT_SUGGESTED_DATES_BUTTON))
# use this instead of btn.click() since the button is out of view
self.driver.execute_script("return arguments[0].click();", btn)
def select_first_course(self):
course_cards = self.wait.until(expected_conditions.visibility_of_all_elements_located(LearningCircleCreationPageLocators.COURSE_CARDS))
self.wait.until(expected_conditions.text_to_be_present_in_element(LearningCircleCreationPageLocators.FIRST_COURSE_TITLE, "Academic Writing"))
course_select_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.FIRST_COURSE_BUTTON))
# button is out of view
self.driver.execute_script("return arguments[0].click();", course_select_button)
# wait until search container is gone
self.wait.until_not(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '.search-container')))
remove_link = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators. REMOVE_COURSE_SELECTION_LINK))
assert 'Remove selection' in remove_link.text
def fill_city_select_field(self, location):
city_select = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators.CITY_SELECT_INPUT))
city_select.send_keys(location)
self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CITY_SELECT_OPTION))
city_select.send_keys(Keys.ENTER)
def click_next_button(self):
next_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.NEXT_TAB_BUTTON))
next_button.click()
def click_publish_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.PUBLISH_BUTTON))
publish_button.click()
def click_save_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SAVE_BUTTON))
publish_button.click()
def click_modal_button(self):
modal_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.MODAL_BUTTON))
modal_button.click()
def click_schedule_meetings_button(self):
meetings_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SCHEDULE_MEETINGS_BUTTON))
meetings_button.click()
def click_login_link(self):
self.driver.find_element_by_css_selector('.registration-modal-content button:first-child').click()
def fill_out_login_modal(self, user_data):
self.fill_text_field(RegistrationModalLocators.EMAIL_FIELD, user_data["email"])
self.fill_text_field(RegistrationModalLocators.PASSWORD_FIELD, user_data["password"])
self.driver.find_element(*RegistrationModalLocators.SUBMIT_BUTTON).click()
def go_to_tab_1(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_1))
tab_button.click()
def go_to_tab_2(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_2))
tab_button.click()
def go_to_tab_3(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_3))
tab_button.click()
def go_to_tab_4(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_4))
tab_button.click()
def go_to_tab_5(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_5))
tab_button.click()
def close_alert(self):
close_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ALERT_CLOSE_BUTTON))
close_button.click()
| true | true |
f71589db678f6272d81bf39a0e17b2bd21472491 | 8,808 | py | Python | postman/forms.py | StriveForBest/django-postman | 25f5fcf5a6d54dbb22b393432701652c21e49552 | [
"BSD-3-Clause"
] | null | null | null | postman/forms.py | StriveForBest/django-postman | 25f5fcf5a6d54dbb22b393432701652c21e49552 | [
"BSD-3-Clause"
] | null | null | null | postman/forms.py | StriveForBest/django-postman | 25f5fcf5a6d54dbb22b393432701652c21e49552 | [
"BSD-3-Clause"
] | 2 | 2015-04-30T13:46:16.000Z | 2019-09-16T06:55:14.000Z | """
You may define your own custom forms, based or inspired by the following ones.
Examples of customization:
recipients = CommaSeparatedUserField(label=("Recipients", "Recipient"),
min=2,
max=5,
user_filter=my_user_filter,
channel='my_channel',
)
can_overwrite_limits = False
exchange_filter = staticmethod(my_exchange_filter)
"""
from __future__ import unicode_literals
from django import forms
from django.conf import settings
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from postman.future_1_5 import get_user_model
from django.db import transaction
from django.utils.translation import ugettext, ugettext_lazy as _
from postman.fields import CommaSeparatedUserField
from postman.models import Message
from postman.utils import WRAP_WIDTH
class BaseWriteForm(forms.ModelForm):
"""The base class for other forms."""
class Meta:
model = Message
fields = ('body',)
widgets = {
# for better confort, ensure a 'cols' of at least
# the 'width' of the body quote formatter.
'body': forms.Textarea(attrs={'cols': WRAP_WIDTH, 'rows': 12}),
}
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
sender = kwargs.pop('sender', None)
exchange_filter = kwargs.pop('exchange_filter', None)
user_filter = kwargs.pop('user_filter', None)
max = kwargs.pop('max', None)
channel = kwargs.pop('channel', None)
self.site = kwargs.pop('site', None)
super(BaseWriteForm, self).__init__(*args, **kwargs)
self.fields['body'].widget.attrs['placeholder'] = 'Write a message'
if 'subject' in self.fields:
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.instance.sender = sender if (sender and sender.is_authenticated()) else None
if exchange_filter:
self.exchange_filter = exchange_filter
if 'recipients' in self.fields:
self.fields['recipients'].widget.attrs['placeholder'] = 'Recipients'
if user_filter and hasattr(self.fields['recipients'], 'user_filter'):
self.fields['recipients'].user_filter = user_filter
if getattr(settings, 'POSTMAN_DISALLOW_MULTIRECIPIENTS', False):
max = 1
if max is not None and hasattr(self.fields['recipients'], 'set_max') \
and getattr(self, 'can_overwrite_limits', True):
self.fields['recipients'].set_max(max)
if channel and hasattr(self.fields['recipients'], 'set_arg'):
self.fields['recipients'].set_arg(channel)
error_messages = {
'filtered': _("Writing to some users is not possible: {users}."),
'filtered_user': _("{username}"),
'filtered_user_with_reason': _("{username} ({reason})"),
}
def clean_recipients(self):
"""Check no filter prohibit the exchange."""
recipients = self.cleaned_data['recipients']
exchange_filter = getattr(self, 'exchange_filter', None)
if exchange_filter:
errors = []
filtered_names = []
recipients_list = recipients[:]
for u in recipients_list:
try:
reason = exchange_filter(self.instance.sender, u, recipients_list)
if reason is not None:
recipients.remove(u)
filtered_names.append(
self.error_messages[
'filtered_user_with_reason' if reason else 'filtered_user'
].format(username=u.get_username(), reason=reason)
)
except forms.ValidationError as e:
recipients.remove(u)
errors.extend(e.messages)
if filtered_names:
errors.append(self.error_messages['filtered'].format(users=', '.join(filtered_names)))
if errors:
raise forms.ValidationError(errors)
return recipients
def save(self, recipient=None, parent=None, auto_moderators=[]):
"""
Save as many messages as there are recipients.
Additional actions:
- If it's a reply, build a conversation
- Call auto-moderators
- Notify parties if needed
Return False if one of the messages is rejected.
"""
recipients = self.cleaned_data.get('recipients', [])
if parent and not parent.thread_id: # at the very first reply, make it a conversation
parent.thread = parent
parent.save()
# but delay the setting of parent.replied_at to the moderation step
if parent:
self.instance.parent = parent
self.instance.thread_id = parent.thread_id
initial_moderation = self.instance.get_moderation()
initial_dates = self.instance.get_dates()
initial_status = self.instance.moderation_status
if recipient:
if isinstance(recipient, get_user_model()) and recipient in recipients:
recipients.remove(recipient)
recipients.insert(0, recipient)
is_successful = True
for r in recipients:
if isinstance(r, get_user_model()):
self.instance.recipient = r
else:
self.instance.recipient = None
self.instance.email = r
self.instance.pk = None # force_insert=True is not accessible from here
self.instance.auto_moderate(auto_moderators)
self.instance.clean_moderation(initial_status)
self.instance.clean_for_visitor()
super(BaseWriteForm, self).save()
if self.instance.is_rejected():
is_successful = False
self.instance.update_parent(initial_status)
self.instance.notify_users(initial_status, self.site)
# some resets for next reuse
if not isinstance(r, get_user_model()):
self.instance.email = ''
self.instance.set_moderation(*initial_moderation)
self.instance.set_dates(*initial_dates)
return is_successful
# commit_on_success() is deprecated in Django 1.6 and will be removed in Django 1.8
save = transaction.atomic(save) if hasattr(transaction, 'atomic') else transaction.commit_on_success(save)
class WriteForm(BaseWriteForm):
"""The form for an authenticated user, to compose a message."""
# specify help_text only to avoid the possible default 'Enter text to search.' of ajax_select v1.2.5
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='')
class Meta(BaseWriteForm.Meta):
fields = ('recipients', 'subject', 'body')
class AnonymousWriteForm(BaseWriteForm):
"""The form for an anonymous user, to compose a message."""
# The 'max' customization should not be permitted here.
# The features available to anonymous users should be kept to the strict minimum.
can_overwrite_limits = False
email = forms.EmailField(label=_("Email"))
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='', max=1) # one recipient is enough
class Meta(BaseWriteForm.Meta):
fields = ('email', 'recipients', 'subject', 'body')
class BaseReplyForm(BaseWriteForm):
"""The base class for a reply to a message."""
def __init__(self, *args, **kwargs):
recipient = kwargs.pop('recipient', None)
super(BaseReplyForm, self).__init__(*args, **kwargs)
self.recipient = recipient
def clean(self):
"""Check that the recipient is correctly initialized."""
if not self.recipient:
raise forms.ValidationError(ugettext("Undefined recipient."))
return super(BaseReplyForm, self).clean()
def save(self, *args, **kwargs):
return super(BaseReplyForm, self).save(self.recipient, *args, **kwargs)
class QuickReplyForm(BaseReplyForm):
"""
The form to use in the view of a message or a conversation, for a quick reply.
The recipient is imposed and a default value for the subject will be provided.
"""
pass
allow_copies = not getattr(settings, 'POSTMAN_DISALLOW_COPIES_ON_REPLY', False)
class FullReplyForm(BaseReplyForm):
"""The complete reply form."""
if allow_copies:
recipients = CommaSeparatedUserField(
label=(_("Additional recipients"), _("Additional recipient")), help_text='', required=False)
class Meta(BaseReplyForm.Meta):
fields = (['recipients'] if allow_copies else []) + ['subject', 'body']
| 40.036364 | 129 | 0.63431 | from __future__ import unicode_literals
from django import forms
from django.conf import settings
try:
from django.contrib.auth import get_user_model
except ImportError:
from postman.future_1_5 import get_user_model
from django.db import transaction
from django.utils.translation import ugettext, ugettext_lazy as _
from postman.fields import CommaSeparatedUserField
from postman.models import Message
from postman.utils import WRAP_WIDTH
class BaseWriteForm(forms.ModelForm):
class Meta:
model = Message
fields = ('body',)
widgets = {
'body': forms.Textarea(attrs={'cols': WRAP_WIDTH, 'rows': 12}),
}
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
sender = kwargs.pop('sender', None)
exchange_filter = kwargs.pop('exchange_filter', None)
user_filter = kwargs.pop('user_filter', None)
max = kwargs.pop('max', None)
channel = kwargs.pop('channel', None)
self.site = kwargs.pop('site', None)
super(BaseWriteForm, self).__init__(*args, **kwargs)
self.fields['body'].widget.attrs['placeholder'] = 'Write a message'
if 'subject' in self.fields:
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.instance.sender = sender if (sender and sender.is_authenticated()) else None
if exchange_filter:
self.exchange_filter = exchange_filter
if 'recipients' in self.fields:
self.fields['recipients'].widget.attrs['placeholder'] = 'Recipients'
if user_filter and hasattr(self.fields['recipients'], 'user_filter'):
self.fields['recipients'].user_filter = user_filter
if getattr(settings, 'POSTMAN_DISALLOW_MULTIRECIPIENTS', False):
max = 1
if max is not None and hasattr(self.fields['recipients'], 'set_max') \
and getattr(self, 'can_overwrite_limits', True):
self.fields['recipients'].set_max(max)
if channel and hasattr(self.fields['recipients'], 'set_arg'):
self.fields['recipients'].set_arg(channel)
error_messages = {
'filtered': _("Writing to some users is not possible: {users}."),
'filtered_user': _("{username}"),
'filtered_user_with_reason': _("{username} ({reason})"),
}
def clean_recipients(self):
recipients = self.cleaned_data['recipients']
exchange_filter = getattr(self, 'exchange_filter', None)
if exchange_filter:
errors = []
filtered_names = []
recipients_list = recipients[:]
for u in recipients_list:
try:
reason = exchange_filter(self.instance.sender, u, recipients_list)
if reason is not None:
recipients.remove(u)
filtered_names.append(
self.error_messages[
'filtered_user_with_reason' if reason else 'filtered_user'
].format(username=u.get_username(), reason=reason)
)
except forms.ValidationError as e:
recipients.remove(u)
errors.extend(e.messages)
if filtered_names:
errors.append(self.error_messages['filtered'].format(users=', '.join(filtered_names)))
if errors:
raise forms.ValidationError(errors)
return recipients
def save(self, recipient=None, parent=None, auto_moderators=[]):
recipients = self.cleaned_data.get('recipients', [])
if parent and not parent.thread_id:
parent.thread = parent
parent.save()
if parent:
self.instance.parent = parent
self.instance.thread_id = parent.thread_id
initial_moderation = self.instance.get_moderation()
initial_dates = self.instance.get_dates()
initial_status = self.instance.moderation_status
if recipient:
if isinstance(recipient, get_user_model()) and recipient in recipients:
recipients.remove(recipient)
recipients.insert(0, recipient)
is_successful = True
for r in recipients:
if isinstance(r, get_user_model()):
self.instance.recipient = r
else:
self.instance.recipient = None
self.instance.email = r
self.instance.pk = None
self.instance.auto_moderate(auto_moderators)
self.instance.clean_moderation(initial_status)
self.instance.clean_for_visitor()
super(BaseWriteForm, self).save()
if self.instance.is_rejected():
is_successful = False
self.instance.update_parent(initial_status)
self.instance.notify_users(initial_status, self.site)
if not isinstance(r, get_user_model()):
self.instance.email = ''
self.instance.set_moderation(*initial_moderation)
self.instance.set_dates(*initial_dates)
return is_successful
save = transaction.atomic(save) if hasattr(transaction, 'atomic') else transaction.commit_on_success(save)
class WriteForm(BaseWriteForm):
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='')
class Meta(BaseWriteForm.Meta):
fields = ('recipients', 'subject', 'body')
class AnonymousWriteForm(BaseWriteForm):
can_overwrite_limits = False
email = forms.EmailField(label=_("Email"))
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='', max=1)
class Meta(BaseWriteForm.Meta):
fields = ('email', 'recipients', 'subject', 'body')
class BaseReplyForm(BaseWriteForm):
def __init__(self, *args, **kwargs):
recipient = kwargs.pop('recipient', None)
super(BaseReplyForm, self).__init__(*args, **kwargs)
self.recipient = recipient
def clean(self):
if not self.recipient:
raise forms.ValidationError(ugettext("Undefined recipient."))
return super(BaseReplyForm, self).clean()
def save(self, *args, **kwargs):
return super(BaseReplyForm, self).save(self.recipient, *args, **kwargs)
class QuickReplyForm(BaseReplyForm):
pass
allow_copies = not getattr(settings, 'POSTMAN_DISALLOW_COPIES_ON_REPLY', False)
class FullReplyForm(BaseReplyForm):
if allow_copies:
recipients = CommaSeparatedUserField(
label=(_("Additional recipients"), _("Additional recipient")), help_text='', required=False)
class Meta(BaseReplyForm.Meta):
fields = (['recipients'] if allow_copies else []) + ['subject', 'body']
| true | true |
f7158ab6ed278e6df18c8b2e6bfd09087bd18ae7 | 426 | py | Python | Util/EnvUtil.py | xrkk/proxy_pool | 7e4f732041f51fa6aa9a2e906ad9e7cab880f2b6 | [
"MIT"
] | null | null | null | Util/EnvUtil.py | xrkk/proxy_pool | 7e4f732041f51fa6aa9a2e906ad9e7cab880f2b6 | [
"MIT"
] | null | null | null | Util/EnvUtil.py | xrkk/proxy_pool | 7e4f732041f51fa6aa9a2e906ad9e7cab880f2b6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: EnvUtil
Description : 环境相关
Author : J_hao
date: 2017/9/18
-------------------------------------------------
Change Activity:
2017/9/18: 区分Python版本
-------------------------------------------------
"""
__author__ = 'J_hao'
import sys
PY3 = sys.version_info >= (3,) | 25.058824 | 50 | 0.319249 |
__author__ = 'J_hao'
import sys
PY3 = sys.version_info >= (3,) | true | true |
f7158afa9cbb6416fad2e41340029a8fbbd333f2 | 12,471 | py | Python | tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py | rtg0795/tfx | 63c31b719896eef645df3850d0e6b946e44cd059 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py | rtg0795/tfx | 63c31b719896eef645df3850d0e6b946e44cd059 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py | rtg0795/tfx | 63c31b719896eef645df3850d0e6b946e44cd059 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.kubeflow.kubeflow_dag_runner."""
import json
import os
import tarfile
from typing import List
from kfp import onprem
import tensorflow as tf
from tfx.components.statistics_gen import component as statistics_gen_component
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.decorators import FinalStatusStr
from tfx.proto import example_gen_pb2
from tfx.types import component_spec
from tfx.utils import telemetry_utils
from tfx.utils import test_case_utils
import yaml
from ml_metadata.proto import metadata_store_pb2
@component
def _say_hi(status: Parameter[str]):
print(status)
# 2-step pipeline under test.
def _two_step_pipeline() -> tfx_pipeline.Pipeline:
default_input_config = json.dumps({
'splits': [{
'name': 'single_split',
'pattern': 'SELECT * FROM default-table'
}]
})
input_config = data_types.RuntimeParameter(
name='input_config', ptype=str, default=default_input_config)
example_gen = big_query_example_gen_component.BigQueryExampleGen(
input_config=input_config, output_config=example_gen_pb2.Output())
statistics_gen = statistics_gen_component.StatisticsGen(
examples=example_gen.outputs['examples'])
return tfx_pipeline.Pipeline(
pipeline_name='two_step_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[example_gen, statistics_gen],
)
class _DummySpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {}
PARAMETERS = {}
class _DummyComponent(base_component.BaseComponent):
SPEC_CLASS = _DummySpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='dummy:latest', command=['ls'])
def __init__(self):
super().__init__(_DummySpec())
def _container_component_pipeline() -> tfx_pipeline.Pipeline:
return tfx_pipeline.Pipeline(
pipeline_name='container_component_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[_DummyComponent()],
)
class KubeflowDagRunnerTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
def _compare_tfx_ir_against_testdata(self, args: List[str], golden_file: str):
index_of_tfx_ir_flag = args.index('--tfx_ir')
self.assertAllGreater(len(args), index_of_tfx_ir_flag)
real_tfx_ir = json.loads(args[index_of_tfx_ir_flag + 1])
real_tfx_ir_str = json.dumps(real_tfx_ir, sort_keys=True)
with open(os.path.join(self._source_data_dir,
golden_file)) as tfx_ir_json_file:
formatted_tfx_ir = json.dumps(json.load(tfx_ir_json_file), sort_keys=True)
self.assertEqual(real_tfx_ir_str, formatted_tfx_ir)
def testTwoStepPipeline(self):
"""Sanity-checks the construction and dependencies for a 2-step pipeline."""
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = os.path.join(self.tmp_dir, 'two_step_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
big_query_container = [
c for c in containers if c['name'] == 'bigqueryexamplegen'
]
self.assertEqual(1, len(big_query_container))
self.assertEqual([
'python',
'-m',
'tfx.orchestration.kubeflow.container_entrypoint',
], big_query_container[0]['container']['command'])
self.assertIn('--tfx_ir', big_query_container[0]['container']['args'])
self.assertIn('--node_id', big_query_container[0]['container']['args'])
self._compare_tfx_ir_against_testdata(
big_query_container[0]['container']['args'],
'two_step_pipeline_post_dehydrate_ir.json')
statistics_gen_container = [
c for c in containers if c['name'] == 'statisticsgen'
]
self.assertEqual(1, len(statistics_gen_container))
# Ensure the pod labels are correctly appended.
metadata = [
c['metadata'] for c in pipeline['spec']['templates'] if 'dag' not in c
]
for m in metadata:
self.assertEqual('tfx', m['labels'][telemetry_utils.LABEL_KFP_SDK_ENV])
# Ensure dependencies between components are captured.
dag = [c for c in pipeline['spec']['templates'] if 'dag' in c]
self.assertEqual(1, len(dag))
self.assertEqual(
{
'tasks': [{
'name': 'bigqueryexamplegen',
'template': 'bigqueryexamplegen',
'arguments': {
'parameters': [{
'name': 'input_config',
'value': '{{inputs.parameters.input_config}}'
}, {
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
}
}, {
'name': 'statisticsgen',
'template': 'statisticsgen',
'arguments': {
'parameters': [{
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
},
'dependencies': ['bigqueryexamplegen'],
}]
}, dag[0]['dag'])
def testDefaultPipelineOperatorFuncs(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
def testMountGcpServiceAccount(self):
kubeflow_dag_runner.KubeflowDagRunner(
config=kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=kubeflow_dag_runner
.get_default_pipeline_operator_funcs(use_gcp_sa=True))).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
# Check that each container has default GCP credentials.
container_0 = containers[0]
env = [
env for env in container_0['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
container_1 = containers[0]
env = [
env for env in container_1['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
def testVolumeMountingPipelineOperatorFuncs(self):
mount_volume_op = onprem.mount_pvc('my-persistent-volume-claim',
'my-volume-name',
'/mnt/volume-mount-path')
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=[mount_volume_op])
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
container_templates = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(container_templates))
volumes = [{
'name': 'my-volume-name',
'persistentVolumeClaim': {
'claimName': 'my-persistent-volume-claim'
}
}]
# Check that the PVC is specified for kfp<=0.1.31.1.
if 'volumes' in pipeline['spec']:
self.assertEqual(volumes, pipeline['spec']['volumes'])
for template in container_templates:
# Check that each container has the volume mounted.
self.assertEqual([{
'name': 'my-volume-name',
'mountPath': '/mnt/volume-mount-path'
}], template['container']['volumeMounts'])
# Check that each template has the PVC specified for kfp>=0.1.31.2.
if 'volumes' in template:
self.assertEqual(volumes, template['volumes'])
def testContainerComponent(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_container_component_pipeline())
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 1)
component_args = containers[0]['container']['args']
self.assertIn('--node_id', component_args)
def testExitHandler(self):
dag_runner = kubeflow_dag_runner.KubeflowDagRunner()
dag_runner.set_exit_handler(_say_hi(status=FinalStatusStr()))
pipeline = _container_component_pipeline()
pipeline.enable_cache = True
dag_runner.run(pipeline)
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
self.assertIn('onExit', pipeline['spec'])
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 2)
exit_component_args = ' '.join(containers[1]['container']['args'])
self.assertIn('{{workflow.status}}', exit_component_args)
self.assertNotIn('enableCache', exit_component_args)
first_component_args = ' '.join(containers[0]['container']['args'])
self.assertNotIn('{{workflow.status}}', first_component_args)
self.assertIn('enableCache', first_component_args)
if __name__ == '__main__':
tf.test.main()
| 37.790909 | 106 | 0.667228 |
import json
import os
import tarfile
from typing import List
from kfp import onprem
import tensorflow as tf
from tfx.components.statistics_gen import component as statistics_gen_component
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.decorators import FinalStatusStr
from tfx.proto import example_gen_pb2
from tfx.types import component_spec
from tfx.utils import telemetry_utils
from tfx.utils import test_case_utils
import yaml
from ml_metadata.proto import metadata_store_pb2
@component
def _say_hi(status: Parameter[str]):
print(status)
def _two_step_pipeline() -> tfx_pipeline.Pipeline:
default_input_config = json.dumps({
'splits': [{
'name': 'single_split',
'pattern': 'SELECT * FROM default-table'
}]
})
input_config = data_types.RuntimeParameter(
name='input_config', ptype=str, default=default_input_config)
example_gen = big_query_example_gen_component.BigQueryExampleGen(
input_config=input_config, output_config=example_gen_pb2.Output())
statistics_gen = statistics_gen_component.StatisticsGen(
examples=example_gen.outputs['examples'])
return tfx_pipeline.Pipeline(
pipeline_name='two_step_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[example_gen, statistics_gen],
)
class _DummySpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {}
PARAMETERS = {}
class _DummyComponent(base_component.BaseComponent):
SPEC_CLASS = _DummySpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='dummy:latest', command=['ls'])
def __init__(self):
super().__init__(_DummySpec())
def _container_component_pipeline() -> tfx_pipeline.Pipeline:
return tfx_pipeline.Pipeline(
pipeline_name='container_component_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[_DummyComponent()],
)
class KubeflowDagRunnerTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
def _compare_tfx_ir_against_testdata(self, args: List[str], golden_file: str):
index_of_tfx_ir_flag = args.index('--tfx_ir')
self.assertAllGreater(len(args), index_of_tfx_ir_flag)
real_tfx_ir = json.loads(args[index_of_tfx_ir_flag + 1])
real_tfx_ir_str = json.dumps(real_tfx_ir, sort_keys=True)
with open(os.path.join(self._source_data_dir,
golden_file)) as tfx_ir_json_file:
formatted_tfx_ir = json.dumps(json.load(tfx_ir_json_file), sort_keys=True)
self.assertEqual(real_tfx_ir_str, formatted_tfx_ir)
def testTwoStepPipeline(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = os.path.join(self.tmp_dir, 'two_step_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
big_query_container = [
c for c in containers if c['name'] == 'bigqueryexamplegen'
]
self.assertEqual(1, len(big_query_container))
self.assertEqual([
'python',
'-m',
'tfx.orchestration.kubeflow.container_entrypoint',
], big_query_container[0]['container']['command'])
self.assertIn('--tfx_ir', big_query_container[0]['container']['args'])
self.assertIn('--node_id', big_query_container[0]['container']['args'])
self._compare_tfx_ir_against_testdata(
big_query_container[0]['container']['args'],
'two_step_pipeline_post_dehydrate_ir.json')
statistics_gen_container = [
c for c in containers if c['name'] == 'statisticsgen'
]
self.assertEqual(1, len(statistics_gen_container))
metadata = [
c['metadata'] for c in pipeline['spec']['templates'] if 'dag' not in c
]
for m in metadata:
self.assertEqual('tfx', m['labels'][telemetry_utils.LABEL_KFP_SDK_ENV])
dag = [c for c in pipeline['spec']['templates'] if 'dag' in c]
self.assertEqual(1, len(dag))
self.assertEqual(
{
'tasks': [{
'name': 'bigqueryexamplegen',
'template': 'bigqueryexamplegen',
'arguments': {
'parameters': [{
'name': 'input_config',
'value': '{{inputs.parameters.input_config}}'
}, {
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
}
}, {
'name': 'statisticsgen',
'template': 'statisticsgen',
'arguments': {
'parameters': [{
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
},
'dependencies': ['bigqueryexamplegen'],
}]
}, dag[0]['dag'])
def testDefaultPipelineOperatorFuncs(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
def testMountGcpServiceAccount(self):
kubeflow_dag_runner.KubeflowDagRunner(
config=kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=kubeflow_dag_runner
.get_default_pipeline_operator_funcs(use_gcp_sa=True))).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
container_0 = containers[0]
env = [
env for env in container_0['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
container_1 = containers[0]
env = [
env for env in container_1['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
def testVolumeMountingPipelineOperatorFuncs(self):
mount_volume_op = onprem.mount_pvc('my-persistent-volume-claim',
'my-volume-name',
'/mnt/volume-mount-path')
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=[mount_volume_op])
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
container_templates = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(container_templates))
volumes = [{
'name': 'my-volume-name',
'persistentVolumeClaim': {
'claimName': 'my-persistent-volume-claim'
}
}]
if 'volumes' in pipeline['spec']:
self.assertEqual(volumes, pipeline['spec']['volumes'])
for template in container_templates:
self.assertEqual([{
'name': 'my-volume-name',
'mountPath': '/mnt/volume-mount-path'
}], template['container']['volumeMounts'])
if 'volumes' in template:
self.assertEqual(volumes, template['volumes'])
def testContainerComponent(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_container_component_pipeline())
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 1)
component_args = containers[0]['container']['args']
self.assertIn('--node_id', component_args)
def testExitHandler(self):
dag_runner = kubeflow_dag_runner.KubeflowDagRunner()
dag_runner.set_exit_handler(_say_hi(status=FinalStatusStr()))
pipeline = _container_component_pipeline()
pipeline.enable_cache = True
dag_runner.run(pipeline)
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
self.assertIn('onExit', pipeline['spec'])
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 2)
exit_component_args = ' '.join(containers[1]['container']['args'])
self.assertIn('{{workflow.status}}', exit_component_args)
self.assertNotIn('enableCache', exit_component_args)
first_component_args = ' '.join(containers[0]['container']['args'])
self.assertNotIn('{{workflow.status}}', first_component_args)
self.assertIn('enableCache', first_component_args)
if __name__ == '__main__':
tf.test.main()
| true | true |
f7158c0d4644817021a89da48a6f1e663928ae91 | 2,766 | py | Python | catalyst/dl/utils/trace.py | 162/catalyst | b4ba36be52c51160e0fabecdcb084a8d5cd96cb7 | [
"MIT"
] | null | null | null | catalyst/dl/utils/trace.py | 162/catalyst | b4ba36be52c51160e0fabecdcb084a8d5cd96cb7 | [
"MIT"
] | null | null | null | catalyst/dl/utils/trace.py | 162/catalyst | b4ba36be52c51160e0fabecdcb084a8d5cd96cb7 | [
"MIT"
] | null | null | null | from typing import Type
import torch
from torch import nn
from torch.jit import ScriptModule
from catalyst.dl.core import Experiment, Runner
class _ForwardOverrideModel(nn.Module):
"""
Model that calls specified method instead of forward
(Workaround, single method tracing is not supported)
"""
def __init__(self, model, method_name):
super().__init__()
self.model = model
self.method = method_name
def forward(self, *args, **kwargs):
return getattr(self.model, self.method)(*args, **kwargs)
class _TracingModelWrapper(nn.Module):
"""
Wrapper that traces model with batch instead of calling it
(Workaround, to use native model batch handler)
"""
def __init__(self, model, method_name):
super().__init__()
self.method_name = method_name
self.model = model
self.tracing_result: ScriptModule
def __call__(self, *args, **kwargs):
method_model = _ForwardOverrideModel(
self.model, self.method_name
)
self.tracing_result = \
torch.jit.trace(
method_model,
*args, **kwargs
)
def _get_native_batch(
experiment: Experiment, stage: str
):
"""Returns dataset from first loader provided by experiment"""
loaders = experiment.get_loaders(stage)
assert loaders, \
"Experiment must have at least one loader to support tracing"
# Take first loader
loader = next(iter(loaders.values()))
dataset = loader.dataset
collate_fn = loader.collate_fn
sample = collate_fn([dataset[0]])
return sample
def trace_model(
model: nn.Module,
experiment: Experiment,
runner_type: Type[Runner],
method_name: str = "forward"
) -> ScriptModule:
"""
Traces model using it's native experiment and runner.
Args:
model: Model to trace
NOTICE: will be switched to eval and
requires_grad=False will be set on all params
experiment: Native experiment that was used to train model
runner_type: Model's native runner that was used to train model
method_name: Model's method name that will be
used as entrypoint during tracing
Returns:
Traced model ScriptModule
"""
stage = list(experiment.stages)[0]
model.eval()
for p in model.parameters():
p.requires_grad_(False)
tracer = _TracingModelWrapper(model, method_name)
runner: Runner = runner_type(tracer.cpu(), torch.device("cpu"))
batch = _get_native_batch(experiment, stage)
batch = runner._batch2device(batch, device=runner.device)
runner.predict_batch(batch)
return tracer.tracing_result
__all__ = ["trace_model"]
| 25.850467 | 73 | 0.656905 | from typing import Type
import torch
from torch import nn
from torch.jit import ScriptModule
from catalyst.dl.core import Experiment, Runner
class _ForwardOverrideModel(nn.Module):
def __init__(self, model, method_name):
super().__init__()
self.model = model
self.method = method_name
def forward(self, *args, **kwargs):
return getattr(self.model, self.method)(*args, **kwargs)
class _TracingModelWrapper(nn.Module):
def __init__(self, model, method_name):
super().__init__()
self.method_name = method_name
self.model = model
self.tracing_result: ScriptModule
def __call__(self, *args, **kwargs):
method_model = _ForwardOverrideModel(
self.model, self.method_name
)
self.tracing_result = \
torch.jit.trace(
method_model,
*args, **kwargs
)
def _get_native_batch(
experiment: Experiment, stage: str
):
loaders = experiment.get_loaders(stage)
assert loaders, \
"Experiment must have at least one loader to support tracing"
loader = next(iter(loaders.values()))
dataset = loader.dataset
collate_fn = loader.collate_fn
sample = collate_fn([dataset[0]])
return sample
def trace_model(
model: nn.Module,
experiment: Experiment,
runner_type: Type[Runner],
method_name: str = "forward"
) -> ScriptModule:
stage = list(experiment.stages)[0]
model.eval()
for p in model.parameters():
p.requires_grad_(False)
tracer = _TracingModelWrapper(model, method_name)
runner: Runner = runner_type(tracer.cpu(), torch.device("cpu"))
batch = _get_native_batch(experiment, stage)
batch = runner._batch2device(batch, device=runner.device)
runner.predict_batch(batch)
return tracer.tracing_result
__all__ = ["trace_model"]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.