hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
880d483bcf560536d72ba8e37dad04edeb068baf | 788 | py | Python | project/__init__.py | manoj75/GitHubToAzure | a90bf44aaaea65fc0d8b75bcaaffae9a654e1891 | [
"MIT"
] | null | null | null | project/__init__.py | manoj75/GitHubToAzure | a90bf44aaaea65fc0d8b75bcaaffae9a654e1891 | [
"MIT"
] | null | null | null | project/__init__.py | manoj75/GitHubToAzure | a90bf44aaaea65fc0d8b75bcaaffae9a654e1891 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_modus import Modus
from flask_migrate import Migrate
app = Flask(__name__)
modus = Modus(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://localhost/flask-blueprints'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = "THIS SHOULD BE HIDDEN!"
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# import a blueprint that we will create
from project.users.views import users_blueprint
from project.dashboard.views import dashboard_blueprint
# register our blueprints with the application
app.register_blueprint(users_blueprint, url_prefix='/users')
app.register_blueprint(dashboard_blueprint, url_prefix='/dashboard')
@app.route('/')
def root():
return "HELLO BLUEPRINTS!" | 30.307692 | 79 | 0.798223 |
c4a172389cd0e3d7e118c3f533255e7db0b18ee8 | 1,439 | py | Python | docs/getting_started/flask_example.py | TimPansino/opentelemetry-python | 51ed4576c611316bd3f74d213501f5ffa3e2a5ca | [
"Apache-2.0"
] | 1 | 2020-03-17T05:37:21.000Z | 2020-03-17T05:37:21.000Z | docs/getting_started/flask_example.py | TimPansino/opentelemetry-python | 51ed4576c611316bd3f74d213501f5ffa3e2a5ca | [
"Apache-2.0"
] | 3 | 2019-08-26T13:06:36.000Z | 2020-02-21T21:44:02.000Z | docs/getting_started/flask_example.py | TimPansino/opentelemetry-python | 51ed4576c611316bd3f74d213501f5ffa3e2a5ca | [
"Apache-2.0"
] | 1 | 2020-10-22T20:13:37.000Z | 2020-10-22T20:13:37.000Z | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flask_example.py
import flask
import requests
from opentelemetry import trace
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
ConsoleSpanExporter,
SimpleExportSpanProcessor,
)
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
)
app = flask.Flask(__name__)
FlaskInstrumentor().instrument_app(app)
RequestsInstrumentor().instrument()
@app.route("/")
def hello():
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("example-request"):
requests.get("http://www.example.com")
return "hello"
app.run(debug=True, port=5000)
| 30.617021 | 74 | 0.784573 |
e3b7d04d13f915eecf44739f86a737e9eec60a65 | 1,616 | py | Python | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py | Syndra/Ambari-source | 717526b2bf3636622212b14de0d3d298a20c7370 | [
"Apache-2.0"
] | 5 | 2017-07-20T11:15:10.000Z | 2020-04-16T15:42:55.000Z | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py | Syndra/Ambari-source | 717526b2bf3636622212b14de0d3d298a20c7370 | [
"Apache-2.0"
] | 8 | 2020-06-18T17:31:19.000Z | 2022-03-02T08:32:03.000Z | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py | Syndra/Ambari-source | 717526b2bf3636622212b14de0d3d298a20c7370 | [
"Apache-2.0"
] | 12 | 2017-05-17T09:48:01.000Z | 2021-08-05T19:01:25.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
# a map of the Ambari role to the component name
# for use with /usr/iop/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
'HBASE_MASTER' : 'hbase-master',
'HBASE_REGIONSERVER' : 'hbase-regionserver',
'HBASE_CLIENT' : 'hbase-client'
}
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HBASE_CLIENT")
config = Script.get_config()
pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
hbase_user = config['configurations']['hbase-env']['hbase_user']
# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = functions.get_kinit_path()
tmp_dir = Script.get_tmp_dir()
hbase_conf_dir = format("/usr/iop/current/{component_directory}/conf")
| 35.911111 | 95 | 0.780322 |
5bf1c3bc0438b78fb7e4414fe61844eb38a22f87 | 5,172 | py | Python | tests/test_services/test_set_113.py | ucloud/ucloud-sdk-python2 | 90fb43198df73a78d64bbd98675dc7b302856057 | [
"Apache-2.0"
] | 19 | 2019-05-15T13:41:58.000Z | 2019-11-13T09:09:37.000Z | tests/test_services/test_set_113.py | ucloud/ucloud-sdk-python2 | 90fb43198df73a78d64bbd98675dc7b302856057 | [
"Apache-2.0"
] | 9 | 2019-07-24T08:31:33.000Z | 2020-09-22T04:01:46.000Z | tests/test_services/test_set_113.py | ucloud/ucloud-sdk-python2 | 90fb43198df73a78d64bbd98675dc7b302856057 | [
"Apache-2.0"
] | 3 | 2019-06-18T00:22:07.000Z | 2020-04-24T02:28:06.000Z | # -*- coding: utf-8 -*-
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(113)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
def test_set_113(client, variables):
scenario.initial(variables)
scenario.variables["Region"] = "cn-bj2"
scenario.variables["Zone"] = "cn-bj2-02"
scenario.run(client)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=3,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateUFSVolume",
)
def create_ufs_volume_00(client, variables):
d = {
"Zone": variables.get("Zone"),
"Size": 1024,
"Region": variables.get("Region"),
}
try:
resp = client.invoke("CreateUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
variables["Volume_Id"] = utest.value_at_path(resp, "VolumeId")
variables["Volume_Name"] = utest.value_at_path(resp, "VolumeName")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUFSVolume",
)
def describe_ufs_volume_01(client, variables):
d = {
"Zone": variables.get("Zone"),
"VolumeId": variables.get("Volume_Id"),
"Region": variables.get("Region"),
}
try:
resp = client.invoke("DescribeUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUHostLite",
)
def describe_uhost_lite_02(client, variables):
d = {"Zone": variables.get("Zone"), "Region": variables.get("Region")}
try:
resp = client.invoke("DescribeUHostLite", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUFSVolume",
)
def describe_ufs_volume_03(client, variables):
d = {
"Zone": variables.get("Zone"),
"VolumeId": "Volume_Id",
"Region": variables.get("Region"),
}
try:
resp = client.invoke("DescribeUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ClearUFSVolumeWhiteList",
)
def clear_ufs_volume_white_list_04(client, variables):
d = {
"VolumeId": variables.get("Volume_Id"),
"Region": variables.get("Region"),
}
try:
resp = client.invoke("ClearUFSVolumeWhiteList", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUFSVolume",
)
def describe_ufs_volume_05(client, variables):
d = {
"Zone": variables.get("Zone"),
"VolumeId": "Volume_Id",
"Region": variables.get("Region"),
}
try:
resp = client.invoke("DescribeUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ExtendUFSVolume",
)
def extend_ufs_volume_06(client, variables):
d = {
"VolumeId": variables.get("Volume_Id"),
"Size": 2048,
"Region": variables.get("Region"),
}
try:
resp = client.invoke("ExtendUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUFSVolume",
)
def describe_ufs_volume_07(client, variables):
d = {
"Zone": variables.get("Zone"),
"VolumeId": "Volume_Id",
"Region": variables.get("Region"),
}
try:
resp = client.invoke("DescribeUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=30,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="RemoveUFSVolume",
)
def remove_ufs_volume_08(client, variables):
d = {
"VolumeId": variables.get("Volume_Id"),
"Region": variables.get("Region"),
}
try:
resp = client.invoke("RemoveUFSVolume", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
| 25.352941 | 74 | 0.632637 |
a49bc73d27a775fe05922d06239c919fdb7af787 | 13,471 | py | Python | model/Resource_Rational_Subgoal_Planning_Agent.py | cogtoolslab/tools_block_construction | e573b28b2a53d27268414dab17b9be4dda257230 | [
"MIT"
] | null | null | null | model/Resource_Rational_Subgoal_Planning_Agent.py | cogtoolslab/tools_block_construction | e573b28b2a53d27268414dab17b9be4dda257230 | [
"MIT"
] | 1 | 2022-02-19T00:04:14.000Z | 2022-02-19T00:04:14.000Z | model/Resource_Rational_Subgoal_Planning_Agent.py | cogtoolslab/tools_block_construction | e573b28b2a53d27268414dab17b9be4dda257230 | [
"MIT"
] | null | null | null | import os
import sys
proj_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0,proj_dir)
from random import choice
import utils.blockworld as blockworld
from model.BFS_Lookahead_Agent import *
import model.utils.decomposition_functions
import copy
import numpy as np
BAD_SCORE = -10**20
MAX_NUMBER_OF_SUBGOALS = 64
"""
NOTE
The corresponding code in decomposition_functions has changed and this will need to be adapted to work
"""
class Resource_Rational_Subgoal_Planning_Agent(BFS_Lookahead_Agent):
"""Implements n subgoal lookahead planning. Works by sampling the lower level agent and using that to score sequences of actions. """
def __init__(self,
world=None,
decomposer = None,
lookahead = 1,
include_subsequences=True,
c_weight = 1000,
S_treshold=0.8,
S_iterations=1,
lower_agent = BFS_Lookahead_Agent(only_improving_actions=True),
random_seed = None
):
self.world = world
self.lookahead = lookahead
self.include_subsequences = include_subsequences # only consider sequences of subgoals exactly `lookahead` long or ending on final decomposition
self.c_weight = c_weight
self.S_threshold = S_treshold #ignore subgoals that are done in less than this proportion
self.S_iterations = S_iterations #how often should we run the simulation to determine S?
self.lower_agent = lower_agent
self.random_seed = random_seed
if decomposer is None:
try:
decomposer = model.utils.decomposition_functions.Horizontal_Construction_Paper(self.world.full_silhouette)
except AttributeError: # no world has been passed, will need to be updated using decomposer.set_silhouette
decomposer = model.utils.decomposition_functions.Horizontal_Construction_Paper(None)
self.decomposer = decomposer
self._cached_subgoal_evaluations = {} #sets up cache for subgoal evaluations
def __str__(self):
"""Yields a string representation of the agent"""
return str(self.get_parameters())
def get_parameters(self):
"""Returns dictionary of agent parameters."""
return {**{
'agent_type':self.__class__.__name__,
'lookeahead':self.lookahead,
'decomposition_function':self.decomposer.__class__.__name__,
'include_subsequences':self.include_subsequences,
'c_weight':self.c_weight,
'S_threshold':self.S_threshold,
'S_iterations':self.S_iterations,
'random_seed':self.random_seed
}, **{"lower level: "+key:value for key,value in self.lower_agent.get_parameters().items()}}
def set_world(self, world):
super().set_world(world)
self.decomposer.set_silhouette(world.full_silhouette)
self._cached_subgoal_evaluations = {} #clear cache
def act(self,steps=1,verbose=False):
"""Finds subgoal plan, then builds the first subgoal. Steps here refers to subgoals (ie. 2 steps is acting the first two planned subgoals). Pass -1 to steps to execute the entire subgoal plan.
NOTE: only the latest decomposed silhouette is saved by experiment runner: the plan needs to be extracted from the saved subgoal sequence."""
if self.random_seed is None: self.random_seed = randint(0,99999)
# get best sequence of subgoals
sequence,sg_planning_cost = self.plan_subgoals(verbose=verbose)
# finally plan and build all subgoals in order
cur_i = 0
lower_level_cost = 0
lower_level_info = []
lower_level_actions = []
self.lower_agent.world = self.world
while cur_i < len(sequence) and cur_i != steps:
current_subgoal = sequence.subgoals[cur_i]
self.world.set_silhouette(current_subgoal.target)
self.world.current_state.clear() #clear caches
while self.world.status()[0] == "Ongoing":
actions, info = self.lower_agent.act()
lower_level_cost += info['states_evaluated']
lower_level_info.append(info)
lower_level_actions+=actions
cur_i += 1
self.world.set_silhouette(self.world.full_silhouette) # restore full silhouette to the world we're acting with
return lower_level_actions,{'states_evaluated':lower_level_cost,
'sg_planning_cost':sg_planning_cost,
'_subgoal_sequence':sequence,
'decomposed_silhouette': current_subgoal.target}
def plan_subgoals(self,verbose=False):
"""Plan a sequence of subgoals. First, we need to compute a sequence of subgoals however many steps in advance (since completion depends on subgoals). Then, we compute the cost and value of every subgoal in the sequence. Finally, we choose the sequence of subgoals that maximizes the total value over all subgoals within."""
self.decomposer.set_silhouette(self.world.full_silhouette) #make sure that the decomposer has the right silhouette
sequences = self.decomposer.get_sequences(state = self.world.current_state,length=self.lookahead,filter_for_length=not self.include_subsequences)
if verbose:
print("Got",len(sequences),"sequences:")
for sequence in sequences:
print([g.name for g in sequence])
# we need to score each in sequence (as it depends on the state before)
number_of_states_evaluated = self.score_subgoals_in_sequence(sequences,verbose=verbose)
# now we need to find the sequences that maximizes the total value of the parts according to the formula $V_{Z}^{g}(s)=\max _{z \in Z}\left\{R(s, z)-C_{\mathrm{Alg}}(s, z)+V_{Z}^{g}(z)\right\}$
return self.choose_sequence(sequences,verbose=verbose),number_of_states_evaluated #return the sequence of subgoals with the highest score
def choose_sequence(self, sequences,verbose=False):
"""Chooses the sequence that maximizes $V_{Z}^{g}(s)=\max _{z \in Z}\left\{R(s, z)-C_{\mathrm{Alg}}(s, z)+V_{Z}^{g}(z)\right\}$ including weighing by lambda"""
scores = [None]*len(sequences)
for i in range(len(sequences)):
scores[i] = self.score_sequence(sequences[i])
if verbose: print("Scoring sequence",i+1,"of",len(sequences),"->",[g.name for g in sequences[i]],"score:",scores[i])
if verbose: print("Chose sequence:\n",sequences[scores.index(max(scores))])
top_indices = [i for i in range(len(scores)) if scores[i] == max(scores)]
top_sequences = [sequences[i] for i in top_indices]
seed(self.random_seed) #fix random seed
return choice(top_sequences)
def score_sequence(self,sequence):
"""Compute the value of a single sequence with precomputed S,R,C. Assigns BAD_SCORE. If no solution can be found, the one with highest total reward is chosen."""
score = 0
penalized = False
for subgoal in sequence:
if subgoal.C is None or subgoal.S is None or subgoal.S < self.S_threshold or subgoal.R <= 0:
# we have a case where the subgoal computation was aborted early or we should ignore the subgoal because the success rate is too low or the reward is zero (subgoal already done or empty)
penalized = True
try:
# compute regular score
subgoal_score = subgoal.R - subgoal.C * self.c_weight
except:
#missing C—happens only in the case of fast_fail
subgoal_score = subgoal.R
penalized = True
score += subgoal_score
return score + (BAD_SCORE * penalized)
def score_subgoals_in_sequence(self,sequences,verbose=False):
"""Add C,R,S to the subgoals in the sequences"""
number_of_states_evaluated = 0
seq_counter = 0 # for verbose printing
for sequence in sequences: #reference or copy?
seq_counter += 1 # for verbose printing
sg_counter = 0 # for verbose printing
prior_world = self.world
for subgoal in sequence:
sg_counter += 1 # for verbose printing
#get reward and cost and success of that particular subgoal and store the resulting world
R = self.reward_of_subgoal(subgoal.target,prior_world.current_state.blockmap)
S,C,winning_world,total_cost,stuck = self.success_and_cost_of_subgoal(subgoal.target,prior_world,iterations=self.S_iterations)
number_of_states_evaluated += total_cost
if verbose:
print("For sequence",seq_counter,'/',len(sequences),
"scored subgoal",
sg_counter,'/',len(sequence),"named",
subgoal.name,
"with C:"+str(C)," R:"+str(R)," S:"+str(S))
#store in the subgoal
subgoal.R = R + stuck * BAD_SCORE
subgoal.C = C
subgoal.S = S
#if we can't solve it to have a base for the next one, we break
if winning_world is None:
break
return number_of_states_evaluated
def reward_of_subgoal(self,decomposition,prior_blockmap):
"""Gets the unscaled reward of a subgoal: the area of a figure that we can fill out by completing the subgoal in number of cells beyond what is already filled out."""
return np.sum((decomposition * self.world.full_silhouette) - (prior_blockmap > 0))
def success_and_cost_of_subgoal(self,decomposition,prior_world = None, iterations=1,max_steps = 20,fast_fail = False):
"""The cost of solving for a certain subgoal given the current block construction"""
if prior_world is None:
prior_world = self.world
# generate key for cache
key = decomposition * 100 - (prior_world.current_state.order_invariant_blockmap() > 0)
key = key.tostring() #make hashable
if key in self._cached_subgoal_evaluations:
# print("Cache hit for",key)
cached_eval = self._cached_subgoal_evaluations[key]
return cached_eval['S'],cached_eval['C'],cached_eval['winning_world'],1,cached_eval['stuck'] #returning 1 as lookup cost, not the cost it tool to calculate the subgoal originally
current_world = copy.deepcopy(prior_world)
costs = 0
wins = 0
winning_world = None
stuck = 0 # have we taken no actions in all iterations?
for i in range(iterations):
temp_world = copy.deepcopy(current_world)
temp_world.set_silhouette(decomposition)
temp_world.current_state.clear() #clear caches
self.lower_agent.world = temp_world
steps = 0
while temp_world.status()[0] == 'Ongoing' and steps < max_steps:
_,info = self.lower_agent.act()
costs += info['states_evaluated']
steps += 1
wins += temp_world.status()[0] == 'Win'
if steps == 0: #we have no steps, which means that the subgoal will lead to infinite costs
stuck += 1
if temp_world.status()[0] == 'Win':
winning_world = copy.deepcopy(temp_world)
#break early to save performance in case of fail
if fast_fail and temp_world.status()[0] == 'Fail':
#store cached evaluation
#NOTE that this will lead to a state being "blacklisted" if it fails once
cached_eval = {'S':wins/iterations,'C':costs/iterations,'winning_world':winning_world,'stuck':stuck == i}
self._cached_subgoal_evaluations[key] = cached_eval
return 0,None,None,costs,stuck == iterations
#store cached evaluation
cached_eval = {'S':wins/iterations,'C':costs/iterations,'winning_world':winning_world,'stuck':stuck == iterations}
self._cached_subgoal_evaluations[key] = cached_eval
return wins/iterations,costs/iterations,winning_world,costs/iterations,stuck == iterations
class Full_Sample_Subgoal_Planning_Agent(Resource_Rational_Subgoal_Planning_Agent):
"""Same as subgoal planning agent, only that we act the entire sequence of subgoals after planning and plan the entire sequence."""
def __init__(self,
world=None,
decomposer = None,
c_weight = 1000,
S_treshold=0.8,
S_iterations=1,
lower_agent = BFS_Lookahead_Agent(only_improving_actions=True),
random_seed = None):
super().__init__(world=world, decomposer=decomposer, lookahead=MAX_NUMBER_OF_SUBGOALS, include_subsequences=False, c_weight=c_weight, S_treshold=S_treshold, S_iterations=S_iterations, lower_agent=lower_agent, random_seed=random_seed)
def act(self, verbose=False):
"""Plans and acts entire sequence"""
return super().act(steps=MAX_NUMBER_OF_SUBGOALS, verbose=verbose)
| 56.364017 | 332 | 0.640264 |
e315dda9b801032de69cd57152bebcc968ebd118 | 4,838 | py | Python | scoreboard/tests/validators_test.py | outdex/ctfscoreboard | ab662e26544a49ba2a80cc0ca48924001e14d9d2 | [
"Apache-2.0"
] | 181 | 2016-07-13T17:08:44.000Z | 2021-11-08T10:30:34.000Z | scoreboard/tests/validators_test.py | outdex/ctfscoreboard | ab662e26544a49ba2a80cc0ca48924001e14d9d2 | [
"Apache-2.0"
] | 250 | 2016-06-02T18:11:46.000Z | 2021-06-30T16:34:03.000Z | scoreboard/tests/validators_test.py | outdex/ctfscoreboard | ab662e26544a49ba2a80cc0ca48924001e14d9d2 | [
"Apache-2.0"
] | 78 | 2016-06-18T07:53:27.000Z | 2022-02-15T02:21:45.000Z | # Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scoreboard.tests import base
from scoreboard import errors
from scoreboard import models
from scoreboard import validators
class ChallengeStub(object):
def __init__(self, answer, validator='static_pbkdf2'):
self.answer_hash = answer
self.validator = validator
class StaticValidatorTest(base.BaseTestCase):
def testStaticValidator(self):
chall = ChallengeStub(None)
validator = validators.GetValidatorForChallenge(chall)
self.assertFalse(validator.validate_answer('fooabc', None))
validator.change_answer('fooabc')
self.assertTrue(validator.validate_answer('fooabc', None))
self.assertFalse(validator.validate_answer('abcfoo', None))
class CaseStaticValidatorTest(base.BaseTestCase):
def testCaseStaticValidator(self):
chall = ChallengeStub(None, validator='static_pbkdf2_ci')
validator = validators.GetValidatorForChallenge(chall)
self.assertFalse(validator.validate_answer('foo', None))
validator.change_answer('FooBar')
for test in ('FooBar', 'foobar', 'FOOBAR', 'fooBAR'):
self.assertTrue(
validator.validate_answer(test, None),
msg='Case failed: {}'.format(test))
for test in ('barfoo', 'bar', 'foo', None):
self.assertFalse(
validator.validate_answer(test, None),
msg='Case failed: {}'.format(test))
class RegexValidatorTest(base.BaseTestCase):
def makeValidator(self, regex):
"""Construct a validator."""
chall = ChallengeStub(regex, validator='regex')
return validators.GetValidatorForChallenge(chall)
def testRegexWorks(self):
v = self.makeValidator('[abc]+')
self.assertTrue(v.validate_answer('aaa', None))
self.assertTrue(v.validate_answer('abc', None))
self.assertFalse(v.validate_answer('ddd', None))
self.assertFalse(v.validate_answer('aaad', None))
self.assertFalse(v.validate_answer('AAA', None))
def testRegexChangeWorks(self):
v = self.makeValidator('[abc]+')
self.assertTrue(v.validate_answer('a', None))
self.assertFalse(v.validate_answer('foo', None))
v.change_answer('fo+')
self.assertTrue(v.validate_answer('foo', None))
self.assertFalse(v.validate_answer('a', None))
class RegexCaseValidatorTest(base.BaseTestCase):
def makeValidator(self, regex):
"""Construct a validator."""
chall = ChallengeStub(regex, validator='regex_ci')
return validators.GetValidatorForChallenge(chall)
def testRegexWorks(self):
v = self.makeValidator('[abc]+')
self.assertTrue(v.validate_answer('aaa', None))
self.assertTrue(v.validate_answer('abc', None))
self.assertFalse(v.validate_answer('ddd', None))
self.assertFalse(v.validate_answer('aaad', None))
self.assertTrue(v.validate_answer('AAA', None))
def testRegexChangeWorks(self):
v = self.makeValidator('[abc]+')
self.assertTrue(v.validate_answer('a', None))
self.assertFalse(v.validate_answer('foo', None))
v.change_answer('fo+')
self.assertTrue(v.validate_answer('Foo', None))
self.assertFalse(v.validate_answer('a', None))
class NonceValidatorTest(base.BaseTestCase):
def setUp(self):
super(NonceValidatorTest, self).setUp()
self.chall = models.Challenge.create(
'foo', 'bar', 100, '', unlocked=True,
validator='nonce_166432')
self.validator = validators.GetValidatorForChallenge(self.chall)
self.validator.change_answer('secret123')
self.team = models.Team.create('footeam')
models.commit()
def testNonceValidator_Basic(self):
answer = self.validator.make_answer(1)
self.assertTrue(self.validator.validate_answer(answer, self.team))
def testNonceValidator_Dupe(self):
answer = self.validator.make_answer(5)
self.assertTrue(self.validator.validate_answer(answer, self.team))
models.commit()
self.assertTrue(self.validator.validate_answer(answer, self.team))
self.assertRaises(errors.IntegrityError, models.commit)
| 38.094488 | 74 | 0.678793 |
e4f9d5bc52e49d3f2e971c75ceb67d24905e13fb | 33,180 | py | Python | complaint_search/tests/test_views_search.py | DalavanCloud/ccdb5-api | 9e0c4bc6e0d33bff153c1a205c979b7ecaa847da | [
"CC0-1.0"
] | 1 | 2019-02-25T00:49:18.000Z | 2019-02-25T00:49:18.000Z | complaint_search/tests/test_views_search.py | DalavanCloud/ccdb5-api | 9e0c4bc6e0d33bff153c1a205c979b7ecaa847da | [
"CC0-1.0"
] | null | null | null | complaint_search/tests/test_views_search.py | DalavanCloud/ccdb5-api | 9e0c4bc6e0d33bff153c1a205c979b7ecaa847da | [
"CC0-1.0"
] | null | null | null | from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse, StreamingHttpResponse
from rest_framework import status
from rest_framework.test import APITestCase
from unittest import skip
import copy
import mock
from datetime import date, datetime
from elasticsearch import TransportError
from complaint_search.defaults import (
AGG_EXCLUDE_FIELDS,
FORMAT_CONTENT_TYPE_MAP,
PARAMS,
)
from complaint_search.es_interface import search
from complaint_search.serializer import SearchInputSerializer
from complaint_search.throttling import (
SearchAnonRateThrottle,
ExportUIRateThrottle,
ExportAnonRateThrottle,
_CCDB_UI_URL,
)
class SearchTests(APITestCase):
def setUp(self):
self.orig_search_anon_rate = SearchAnonRateThrottle.rate
self.orig_export_ui_rate = ExportUIRateThrottle.rate
self.orig_export_anon_rate = ExportAnonRateThrottle.rate
# Setting rates to something really big so it doesn't affect testing
SearchAnonRateThrottle.rate = '2000/min'
ExportUIRateThrottle.rate = '2000/min'
ExportAnonRateThrottle.rate = '2000/min'
def tearDown(self):
cache.clear()
SearchAnonRateThrottle.rate = self.orig_search_anon_rate
ExportUIRateThrottle.rate = self.orig_export_ui_rate
ExportAnonRateThrottle.rate = self.orig_export_anon_rate
def buildDefaultParams(self, overrides):
params = copy.deepcopy(PARAMS)
params.update(overrides)
return params
@mock.patch('complaint_search.es_interface.search')
def test_search_no_param(self, mock_essearch):
"""
Searching with no parameters
"""
url = reverse('complaint_search:search')
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_cors_headers(self, mock_essearch):
"""
Make sure the response has CORS headers in debug mode
"""
settings.DEBUG = True
url = reverse('complaint_search:search')
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.has_header('Access-Control-Allow-Origin'))
@mock.patch('complaint_search.views.datetime')
@mock.patch('complaint_search.es_interface.search')
def test_search_with_format(self, mock_essearch, mock_dt):
"""
Searching with format
"""
for k, v in FORMAT_CONTENT_TYPE_MAP.iteritems():
url = reverse('complaint_search:search')
params = {"format": k}
mock_essearch.return_value = 'OK'
mock_dt.now.return_value = datetime(2017,1,1,12,0)
response = self.client.get(url, params)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(v, response.get('Content-Type'))
self.assertEqual(response.get('Content-Disposition'), 'attachment; filename="complaints-2017-01-01_12_00.{}"'.format(k))
self.assertTrue(isinstance(response, StreamingHttpResponse))
mock_essearch.has_calls([ mock.call(format=k) for k in FORMAT_CONTENT_TYPE_MAP ], any_order=True)
self.assertEqual(len(FORMAT_CONTENT_TYPE_MAP), mock_essearch.call_count)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_field__valid(self, mock_essearch):
url = reverse('complaint_search:search')
for field in SearchInputSerializer.FIELD_CHOICES:
params = {"field": field[0]}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual('OK', response.data)
calls = [ mock.call(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"field": SearchInputSerializer.FIELD_MAP.get(field_pair[0],
field_pair[0])}))
for field_pair in SearchInputSerializer.FIELD_CHOICES ]
mock_essearch.assert_has_calls(calls)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_field__invalid_choice(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"field": "invalid_choice"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"field": ["\"invalid_choice\" is not a valid choice."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_size__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"size": 4}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams(params))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_size__valid_zero(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"size": 0}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams(params))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_size__invalid_type(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"size": "not_integer"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"size": ["A valid integer is required."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_size__invalid_smaller_than_min_number(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"size": -1}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual(
{"size": ["Ensure this value is greater than or equal to 0."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_size__invalid_exceed_max_number(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"size": 10000001}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual(
{"size": ["Ensure this value is less than or equal to 10000000."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_frm__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = { "frm": 10 }
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams(params))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_frm__invalid_type(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"frm": "not_integer"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"frm": ["A valid integer is required."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_frm__invalid_smaller_than_min_number(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"frm": -1}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual(
{"frm": ["Ensure this value is greater than or equal to 0."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_frm__invalid_exceed_max_number(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"frm": 10000001}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual(
{"frm": ["Ensure this value is less than or equal to 10000000."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_frm__invalid_frm_not_multiples_of_size(self, mock_essearch):
url = reverse('complaint_search:search')
params = { "frm": 4 }
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual(
{"non_field_errors": ["frm is not zero or a multiple of size"]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_sort__valid(self, mock_essearch):
url = reverse('complaint_search:search')
for sort in SearchInputSerializer.SORT_CHOICES:
params = {"sort": sort[0]}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual('OK', response.data)
calls = [ mock.call(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({"sort": sort_pair[0]}))
for sort_pair in SearchInputSerializer.SORT_CHOICES ]
mock_essearch.assert_has_calls(calls)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_sort__invalid_choice(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"sort": "invalid_choice"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"sort": ["\"invalid_choice\" is not a valid choice."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_search_term__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"search_term": "FHA Mortgage"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams(params))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_date_received_min__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"date_received_min": "2017-04-11"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"date_received_min": date(2017, 4, 11)}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_date_received_min__invalid_format(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"date_received_min": "not_a_date"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"date_received_min": ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_date_received_max__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"date_received_max": "2017-04-11"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"date_received_max": date(2017, 4, 11)}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_date_received_max__invalid_format(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"date_received_max": "not_a_date"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"date_received_max": ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company_received_min__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"company_received_min": "2017-04-11"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"company_received_min": date(2017, 4, 11)}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company_received_min__invalid_format(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"company_received_min": "not_a_date"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"company_received_min": ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company_received_max__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"company_received_max": "2017-04-11"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"company_received_max": date(2017, 4, 11)}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company_received_max__invalid_format(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"company_received_max": "not_a_date"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"company_received_max": ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]."]},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?company=One%20Bank&company=Bank%202"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"company": ["One Bank", "Bank 2"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_product__valid(self, mock_essearch):
url = reverse('complaint_search:search')
# parameter doesn't represent real data, as client has a hard time
# taking unicode. The original API will use a bullet u2022 in place
# of the '-'
url += "?product=Mortgage-FHA%20Mortgage&product=Payday%20Loan"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"product": ["Mortgage-FHA Mortgage", "Payday Loan"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_issue__valid(self, mock_essearch):
url = reverse('complaint_search:search')
# parameter doesn't represent real data, as client has a hard time
# taking unicode. The original API will use a bullet u2022 in place
# of the '-'
url += "?issue=Communication%20tactics-Frequent%20or%20repeated%20calls" \
"&issue=Loan%20servicing,%20payments,%20escrow%20account"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"issue": ["Communication tactics-Frequent or repeated calls",
"Loan servicing, payments, escrow account"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_state__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?state=CA&state=FL&state=VA"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"state": ["CA", "FL", "VA"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_zip_code__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?zip_code=94XXX&zip_code=24236&zip_code=23456"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"zip_code": ["94XXX", "24236", "23456"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_timely__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?timely=YES&timely=NO"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"timely": ["YES", "NO"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_consumer_disputed__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?consumer_disputed=yes&consumer_disputed=no"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"consumer_disputed": ["yes", "no"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company_response__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?company_response=Closed&company_response=No%20response"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"company_response": ["Closed", "No response"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_company_public_response__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?company_public_response=Closed&company_public_response=No%20response"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"company_public_response": ["Closed", "No response"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_consumer_consent_provided__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?consumer_consent_provided=Yes&consumer_consent_provided=No"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"consumer_consent_provided": ["Yes", "No"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_has_narrative__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?has_narrative=Yes&has_narrative=No"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"has_narrative": ["Yes", "No"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_submitted_via__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?submitted_via=Web&submitted_via=Phone"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"submitted_via": ["Web", "Phone"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_tags__valid(self, mock_essearch):
url = reverse('complaint_search:search')
url += "?tags=Older%20American&tags=Servicemember"
mock_essearch.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# -*- coding: utf-8 -*-
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"tags": ["Older American", "Servicemember"]}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_no_aggs__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"no_aggs": True}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"no_aggs": True}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_no_aggs__invalid_type(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"no_aggs": "Not boolean"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"no_aggs": [u'"Not boolean" is not a valid boolean.']},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_no_highlight__valid(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"no_highlight": True}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_200_OK, response.status_code)
mock_essearch.assert_called_once_with(agg_exclude=AGG_EXCLUDE_FIELDS,
**self.buildDefaultParams({
"no_highlight": True}))
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_no_highlight__invalid_type(self, mock_essearch):
url = reverse('complaint_search:search')
params = {"no_highlight": "Not boolean"}
mock_essearch.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essearch.assert_not_called()
self.assertDictEqual({"no_highlight": [u'"Not boolean" is not a valid boolean.']},
response.data)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_search_anon_rate_throttle(self, mock_essearch):
url = reverse('complaint_search:search')
mock_essearch.return_value = 'OK'
SearchAnonRateThrottle.rate = self.orig_search_anon_rate
ExportUIRateThrottle.rate = self.orig_export_ui_rate
ExportAnonRateThrottle.rate = self.orig_export_anon_rate
limit = int(self.orig_search_anon_rate.split('/')[0])
for _ in range(limit):
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('OK', response.data)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
self.assertIsNotNone(response.data.get('detail'))
self.assertIn("Request was throttled", response.data.get('detail'))
self.assertEqual(limit, mock_essearch.call_count)
self.assertEqual(20, limit)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_search_ui_rate_throttle(self, mock_essearch):
url = reverse('complaint_search:search')
mock_essearch.return_value = 'OK'
SearchAnonRateThrottle.rate = self.orig_search_anon_rate
ExportUIRateThrottle.rate = self.orig_export_ui_rate
ExportAnonRateThrottle.rate = self.orig_export_anon_rate
limit = int(self.orig_search_anon_rate.split('/')[0])
for _ in range(limit):
response = self.client.get(url, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('OK', response.data)
response = self.client.get(url, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, 200)
self.assertEqual('OK', response.data)
self.assertEqual(limit + 1, mock_essearch.call_count)
self.assertEqual(20, limit)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_export_anon_rate_throttle(self, mock_essearch):
url = reverse('complaint_search:search')
mock_essearch.return_value = 'OK'
SearchAnonRateThrottle.rate = self.orig_search_anon_rate
ExportUIRateThrottle.rate = self.orig_export_ui_rate
ExportAnonRateThrottle.rate = self.orig_export_anon_rate
limit = int(self.orig_export_anon_rate.split('/')[0])
for i in range(limit):
response = self.client.get(url, {"format": "csv"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(isinstance(response, StreamingHttpResponse))
response = self.client.get(url, {"format": "csv"})
self.assertEqual(response.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
self.assertIsNotNone(response.data.get('detail'))
self.assertIn("Request was throttled", response.data.get('detail'))
self.assertEqual(limit, mock_essearch.call_count)
self.assertEqual(2, limit)
@mock.patch('complaint_search.es_interface.search')
def test_search_with_export_ui_rate_throttle(self, mock_essearch):
url = reverse('complaint_search:search')
mock_essearch.return_value = 'OK'
SearchAnonRateThrottle.rate = self.orig_search_anon_rate
ExportUIRateThrottle.rate = self.orig_export_ui_rate
ExportAnonRateThrottle.rate = self.orig_export_anon_rate
limit = int(self.orig_export_ui_rate.split('/')[0])
for _ in range(limit):
response = self.client.get(url, {"format": "csv"}, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(isinstance(response, StreamingHttpResponse))
response = self.client.get(url, {"format": "csv"}, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
self.assertIsNotNone(response.data.get('detail'))
self.assertIn("Request was throttled", response.data.get('detail'))
self.assertEqual(limit, mock_essearch.call_count)
self.assertEqual(6, limit)
@mock.patch('complaint_search.es_interface.search')
def test_search__transport_error(self, mock_essearch):
mock_essearch.side_effect = TransportError('N/A', "Error")
url = reverse('complaint_search:search')
response = self.client.get(url)
self.assertEqual(response.status_code, 424)
self.assertDictEqual(
{"error": "There was an error calling Elasticsearch"},
response.data
)
@mock.patch('complaint_search.es_interface.search')
def test_search__big_error(self, mock_essearch):
mock_essearch.side_effect = MemoryError("Out of memory")
url = reverse('complaint_search:search')
response = self.client.get(url)
self.assertEqual(response.status_code, 500)
self.assertDictEqual(
{"error": "There was a problem retrieving your request"},
response.data
)
| 48.367347 | 132 | 0.687432 |
a331e3adbe63b4d8df8b0c7742e18b5a1f453c64 | 48,663 | py | Python | superannotate/mixp/utils/parsers.py | dskkato/superannotate-python-sdk | 67eece2d7d06375ad2e502c2282e3b29c9b82631 | [
"MIT"
] | null | null | null | superannotate/mixp/utils/parsers.py | dskkato/superannotate-python-sdk | 67eece2d7d06375ad2e502c2282e3b29c9b82631 | [
"MIT"
] | null | null | null | superannotate/mixp/utils/parsers.py | dskkato/superannotate-python-sdk | 67eece2d7d06375ad2e502c2282e3b29c9b82631 | [
"MIT"
] | null | null | null | def get_project_name(project):
project_name = ""
if isinstance(project, dict):
project_name = project['name']
if isinstance(project, str):
if '/' in project:
project_name = project.split('/')[0]
else:
project_name = project
return project_name
def get_team_metadata(*args, **kwargs):
return {"event_name": "get_team_metadata", "properties": {}}
def invite_contributor_to_team(*args, **kwargs):
admin = kwargs.get("admin", None)
if not admin:
admin = args[1:2]
if admin:
admin = "CUSTOM"
else:
admin = "DEFAULT"
return {
"event_name": "invite_contributor_to_team",
"properties": {
"Admin": admin
}
}
def delete_contributor_to_team_invitation(*args, **kwargs):
return {
"event_name": "delete_contributor_to_team_invitation",
"properties": {}
}
def search_team_contributors(*args, **kwargs):
return {
"event_name": "search_team_contributors",
"properties":
{
"Email": bool(args[0:1] or kwargs.get("email", None)),
"Name": bool(args[1:2] or kwargs.get("first_name", None)),
"Surname": bool(args[2:3] or kwargs.get("last_name", None))
}
}
def search_projects(*args, **kwargs):
project = kwargs.get("name", None)
if not project:
project_name = None
project = args[0:1]
if project:
project_name = get_project_name(project[0])
else:
project_name = get_project_name(project)
return {
"event_name": "search_projects",
"properties":
{
"Metadata":
bool(args[2:3] or kwargs.get("return_metadata", None)),
"project_name":
project_name
}
}
def create_project(*args, **kwargs):
project = kwargs.get("project_name", None)
if not project:
project = args[0]
project_type = kwargs.get("project_type", None)
if not project_type:
project_type = args[2]
return {
"event_name": "create_project",
"properties":
{
"Project Type": project_type,
"project_name": get_project_name(project)
}
}
def create_project_from_metadata(*args, **kwargs):
project = kwargs.get("project_metadata", None)
if not project:
project = args[0]
return {
"event_name": "create_project_from_metadata",
"properties": {
"project_name": get_project_name(project)
}
}
def clone_project(*args, **kwargs):
project = kwargs.get("project_name", None)
if not project:
project = args[0]
return {
"event_name": "clone_project",
"properties":
{
"Copy Classes":
bool(
args[3:4] or
kwargs.get("copy_annotation_classes", None)
),
"Copy Settings":
bool(args[4:5] or kwargs.get("copy_settings", None)),
"Copy Workflow":
bool(args[5:6] or kwargs.get("copy_workflow", None)),
"Copy Contributors":
bool(args[6:7] or kwargs.get("copy_contributors", None)),
"project_name":
get_project_name(project)
}
}
def search_images(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "search_images",
"properties":
{
"Annotation Status":
bool(args[2:3] or kwargs.get("annotation_status", None)),
"Metadata":
bool(args[3:4] or kwargs.get("return_metadata", None)),
"project_name":
get_project_name(project)
}
}
def upload_images_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
img_paths = kwargs.get("img_paths", [])
if not img_paths:
img_paths += args[1]
return {
"event_name": "upload_images_to_project",
"properties":
{
"Image Count":
len(img_paths),
"Annotation Status":
bool(args[2:3] or kwargs.get("annotation_status", None)),
"From S3":
bool(args[3:4] or kwargs.get("from_s3", None)),
"project_name":
get_project_name(project)
}
}
def upload_image_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "upload_image_to_project",
"properties":
{
"Image Name":
bool(args[2:3] or kwargs.get("image_name", None)),
"Annotation Status":
bool(args[3:4] or kwargs.get("annotation_status", None)),
"project_name":
get_project_name(project)
}
}
def upload_images_from_public_urls_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
img_urls = kwargs.get("img_urls", [])
if not img_urls:
img_urls += args[1]
return {
"event_name": "upload_images_from_public_urls_to_project",
"properties":
{
"Image Count":
len(img_urls),
"Image Name":
bool(args[2:3] or kwargs.get("img_names", None)),
"Annotation Status":
bool(args[3:4] or kwargs.get("annotation_status", None)),
"project_name":
get_project_name(project)
}
}
def upload_images_from_google_cloud_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "upload_images_from_google_cloud_to_project",
"properties": {
"project_name": get_project_name(project)
}
}
def upload_images_from_azure_blob_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "upload_images_from_azure_blob_to_project",
"properties": {
"project_name": get_project_name(project)
}
}
def upload_video_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "upload_video_to_project",
"properties":
{
"project_name": get_project_name(project),
"FPS": bool(args[2:3] or kwargs.get("target_fps", None)),
"Start": bool(args[3:4] or kwargs.get("start_time", None)),
"End": bool(args[4:5] or kwargs.get("end_time", None))
}
}
def attach_image_urls_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "attach_image_urls_to_project",
"properties":
{
"project_name":
get_project_name(project),
"Annotation Status":
bool(args[2:3] or kwargs.get("annotation_status", None))
}
}
def set_images_annotation_statuses(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
annotation_status = kwargs.get("annotation_status", None)
if not annotation_status:
annotation_status = args[2]
image_names = kwargs.get("image_names", [])
if not image_names:
image_names = args[1]
return {
"event_name": "set_images_annotation_statuses",
"properties":
{
"project_name": get_project_name(project),
"Image Count": len(image_names),
"Annotation Status": annotation_status
}
}
def get_image_annotations(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_image_annotations",
"properties": {
"project_name": get_project_name(project),
}
}
def get_image_preannotations(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_image_preannotations",
"properties": {
"project_name": get_project_name(project),
}
}
def download_image_annotations(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "download_image_annotations",
"properties": {
"project_name": get_project_name(project),
}
}
def download_image_preannotations(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "download_image_preannotations",
"properties": {
"project_name": get_project_name(project),
}
}
def get_image_metadata(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_image_metadata",
"properties": {
"project_name": get_project_name(project),
}
}
def get_image_bytes(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_image_bytes",
"properties": {
"project_name": get_project_name(project),
}
}
def delete_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "delete_image",
"properties": {
"project_name": get_project_name(project),
}
}
def add_annotation_comment_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_comment_to_image",
"properties": {
"project_name": get_project_name(project),
}
}
def delete_annotation_class(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "delete_annotation_class",
"properties": {
"project_name": get_project_name(project),
}
}
def get_annotation_class_metadata(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_annotation_class_metadata",
"properties": {
"project_name": get_project_name(project),
}
}
def download_annotation_classes_json(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "download_annotation_classes_json",
"properties": {
"project_name": get_project_name(project),
}
}
def search_annotation_classes(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "search_annotation_classes",
"properties": {
"project_name": get_project_name(project),
}
}
def unshare_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "unshare_project",
"properties": {
"project_name": get_project_name(project),
}
}
def get_project_image_count(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_project_image_count",
"properties": {
"project_name": get_project_name(project),
}
}
def get_project_settings(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_project_settings",
"properties": {
"project_name": get_project_name(project),
}
}
def set_project_settings(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "set_project_settings",
"properties": {
"project_name": get_project_name(project),
}
}
def get_project_default_image_quality_in_editor(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_project_default_image_quality_in_editor",
"properties": {
"project_name": get_project_name(project),
}
}
def get_project_metadata(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_project_metadata",
"properties": {
"project_name": get_project_name(project),
}
}
def delete_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "delete_project",
"properties": {
"project_name": get_project_name(project),
}
}
def rename_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "rename_project",
"properties": {
"project_name": get_project_name(project),
}
}
def get_project_workflow(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_project_workflow",
"properties": {
"project_name": get_project_name(project),
}
}
def set_project_workflow(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "set_project_workflow",
"properties": {
"project_name": get_project_name(project),
}
}
def create_folder(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "create_folder",
"properties": {
"project_name": get_project_name(project),
}
}
def get_folder_metadata(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_folder_metadata",
"properties": {
"project_name": get_project_name(project),
}
}
def get_project_and_folder_metadata(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_project_and_folder_metadata",
"properties": {
"project_name": get_project_name(project),
}
}
def rename_folder(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "rename_folder",
"properties": {
"project_name": get_project_name(project),
}
}
def stop_model_training(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "stop_model_training",
"properties": {
"project_name": get_project_name(project),
}
}
def download_model(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "download_model",
"properties": {
"project_name": get_project_name(project),
}
}
def plot_model_metrics(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "plot_model_metrics",
"properties": {
"project_name": get_project_name(project),
}
}
def delete_model(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "delete_model",
"properties": {
"project_name": get_project_name(project),
}
}
def convert_project_type(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "convert_project_type",
"properties": {
"project_name": get_project_name(project),
}
}
def convert_json_version(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "convert_json_version",
"properties": {
"project_name": get_project_name(project),
}
}
def df_to_annotations(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "df_to_annotations",
"properties": {
"project_name": get_project_name(project),
}
}
def upload_image_annotations(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "upload_image_annotations",
"properties":
{
"project_name": get_project_name(project),
"Pixel": bool(args[3:4] or ("mask" in kwargs)),
}
}
def download_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "download_image",
"properties":
{
"project_name":
get_project_name(project),
"Download Annotations":
bool(args[3:4] or ("include_annotations" in kwargs)),
"Download Fuse":
bool(args[4:5] or ("include_fuse" in kwargs)),
"Download Overlay":
bool(args[5:6] or ("include_overlay" in kwargs)),
}
}
def copy_image(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
return {
"event_name": "copy_image",
"properties":
{
"project_name":
get_project_name(project),
"Copy Annotations":
bool(args[3:4] or ("include_annotations" in kwargs)),
"Copy Annotation Status":
bool(args[4:5] or ("copy_annotation_status" in kwargs)),
"Copy Pin":
bool(args[5:6] or ("copy_pin" in kwargs)),
}
}
def run_prediction(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
from superannotate.db.projects import get_project_metadata as sa_get_project_metadata
project_name = get_project_name(project)
project_metadata = sa_get_project_metadata(project_name)
image_list = kwargs.get("images_list", None)
if not image_list:
image_list = args[1]
return {
"event_name": "run_prediction",
"properties":
{
"Project Type": project_metadata['type'],
"Image Count": len(image_list)
}
}
def run_segmentation(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
from superannotate.db.projects import get_project_metadata as sa_get_project_metadata
project_name = get_project_name(project)
project_metadata = sa_get_project_metadata(project_name)
image_list = kwargs.get("images_list", None)
if not image_list:
image_list = args[1]
model = kwargs.get("model", None)
if not model:
model = args[2]
return {
"event_name": "run_segmentation",
"properties":
{
"Project Type": project_metadata['type'],
"Image Count": len(image_list),
"Model": model
}
}
def upload_videos_from_folder_to_project(*args, **kwargs):
folder_path = kwargs.get("folder_path", None)
if not folder_path:
folder_path = args[1]
from pathlib import Path
glob_iterator = Path(folder_path).glob('*')
return {
"event_name": "upload_videos_from_folder_to_project",
"properties": {
"Video Count": sum(1 for _ in glob_iterator),
}
}
def export_annotation(*args, **kwargs):
dataset_format = kwargs.get("dataset_format", None)
if not dataset_format:
dataset_format = args[2]
project_type = kwargs.get("project_type", None)
if not project_type:
project_type = args[4:5]
if not project_type:
project_type = 'Vector'
else:
project_type = args[4]
task = kwargs.get("task", None)
if not task:
task = args[5:6]
if not task:
task = "object_detection"
else:
task = args[5]
return {
"event_name": "export_annotation",
"properties":
{
"Format": dataset_format,
"Project Type": project_type,
"Task": task
}
}
def import_annotation(*args, **kwargs):
dataset_format = kwargs.get("dataset_format", None)
if not dataset_format:
dataset_format = args[2]
project_type = kwargs.get("project_type", None)
if not project_type:
project_type = args[4:5]
if not project_type:
project_type = 'Vector'
else:
project_type = args[4]
task = kwargs.get("task", None)
if not task:
task = args[5:6]
if not task:
task = "object_detection"
else:
task = args[5]
return {
"event_name": "import_annotation",
"properties":
{
"Format": dataset_format,
"Project Type": project_type,
"Task": task
}
}
def move_images(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
image_names = kwargs.get("image_names", False)
if image_names == False:
image_names = args[0]
if image_names == None:
from superannotate.db.images import search_images as sa_search_images
image_names = sa_search_images(project)
return {
"event_name": "move_images",
"properties":
{
"project_name":
get_project_name(project),
"Image Count":
len(image_names),
"Copy Annotations":
bool(args[3:4] or ("include_annotations" in kwargs)),
"Copy Annotation Status":
bool(args[4:5] or ("copy_annotation_status" in kwargs)),
"Copy Pin":
bool(args[5:6] or ("copy_pin" in kwargs)),
}
}
def copy_images(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
image_names = kwargs.get("image_names", False)
if image_names == False:
image_names = args[1]
if image_names == None:
from superannotate.db.images import search_images as sa_search_images
image_names = sa_search_images(project)
return {
"event_name": "copy_images",
"properties":
{
"project_name":
get_project_name(project),
"Image Count":
len(image_names),
"Copy Annotations":
bool(args[3:4] or ("include_annotations" in kwargs)),
"Copy Annotation Status":
bool(args[4:5] or ("copy_annotation_status" in kwargs)),
"Copy Pin":
bool(args[5:6] or ("copy_pin" in kwargs)),
}
}
def consensus(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
folder_names = kwargs.get("folder_names", None)
if not folder_names:
folder_names = args[1]
image_list = kwargs.get("image_list", "empty")
if image_list == "empty":
image_list = args[4:5]
if image_list:
if image_list[0] == None:
from superannotate.db.images import search_images as sa_search_images
image_list = sa_search_images(project)
else:
image_list = image_list[0]
annot_type = kwargs.get("annot_type", "empty")
if annot_type == 'empty':
annot_type = args[4:5]
if not annot_type:
annot_type = "bbox"
else:
annot_type = args[4]
show_plots = kwargs.get("show_plots", "empty")
if show_plots == "empty":
show_plots = args[5:6]
if not show_plots:
show_plots = False
else:
show_plots = args[5]
return {
"event_name": "consensus",
"properties":
{
"Folder Count": len(folder_names),
"Image Count": len(image_list),
"Annotation Type": annot_type,
"Plot": show_plots
}
}
def benchmark(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
folder_names = kwargs.get("folder_names", None)
if not folder_names:
folder_names = args[2]
image_list = kwargs.get("image_list", "empty")
if image_list == "empty":
image_list = args[4:5]
if image_list:
if image_list[0] == None:
from superannotate.db.images import search_images as sa_search_images
image_list = sa_search_images(project)
else:
image_list = image_list[0]
annot_type = kwargs.get("annot_type", "empty")
if annot_type == 'empty':
annot_type = args[5:6]
if not annot_type:
annot_type = "bbox"
else:
annot_type = args[5]
show_plots = kwargs.get("show_plots", "empty")
if show_plots == "empty":
show_plots = args[6:7]
if not show_plots:
show_plots = False
else:
show_plots = args[6]
return {
"event_name": "benchmark",
"properties":
{
"Folder Count": len(folder_names),
"Image Count": len(image_list),
"Annotation Type": annot_type,
"Plot": show_plots
}
}
def upload_annotations_from_folder_to_project(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
from superannotate.db.projects import get_project_metadata as sa_get_project_metadata
project_name = get_project_name(project)
project_metadata = sa_get_project_metadata(project_name)
folder_path = kwargs.get("folder_path", None)
if not folder_path:
folder_path = args[1]
from pathlib import Path
glob_iterator = Path(folder_path).glob('*.json')
return {
"event_name": "upload_annotations_from_folder_to_project",
"properties":
{
"Annotation Count": sum(1 for _ in glob_iterator),
"Project Type": project_metadata['type'],
"From S3": bool(args[2:3] or ("from_s3_bucket" in kwargs))
}
}
def upload_preannotations_from_folder_to_project(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
from superannotate.db.projects import get_project_metadata as sa_get_project_metadata
project_name = get_project_name(project)
project_metadata = sa_get_project_metadata(project_name)
folder_path = kwargs.get("folder_path", None)
if not folder_path:
folder_path = args[1]
from pathlib import Path
glob_iterator = Path(folder_path).glob('*.json')
return {
"event_name": "upload_preannotations_from_folder_to_project",
"properties":
{
"Annotation Count": sum(1 for _ in glob_iterator),
"Project Type": project_metadata['type'],
"From S3": bool(args[2:3] or ("from_s3_bucket" in kwargs))
}
}
def upload_images_from_folder_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
folder_path = kwargs.get("folder_path", None)
if not folder_path:
folder_path = args[1]
recursive_subfolders = kwargs.get("recursive_subfolders", None)
if not recursive_subfolders:
recursive_subfolders = args[6:7]
if recursive_subfolders:
recursive_subfolders = recursive_subfolders[0]
else:
recursive_subfolders = False
extensions = kwargs.get("extensions", None)
if not extensions:
extensions = args[2:3]
if extensions:
extensions = extensions[0]
else:
from ...common import DEFAULT_IMAGE_EXTENSIONS
extensions = DEFAULT_IMAGE_EXTENSIONS
exclude_file_patterns = kwargs.get("exclude_file_patterns", None)
if not exclude_file_patterns:
exclude_file_patterns = args[5:6]
if exclude_file_patterns:
exclude_file_patterns = exclude_file_patterns[0]
else:
from ...common import DEFAULT_FILE_EXCLUDE_PATTERNS
exclude_file_patterns = DEFAULT_FILE_EXCLUDE_PATTERNS
from pathlib import Path
import os
paths = []
for extension in extensions:
if not recursive_subfolders:
paths += list(Path(folder_path).glob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(Path(folder_path).glob(f'*.{extension.upper()}'))
else:
paths += list(Path(folder_path).rglob(f'*.{extension.lower()}'))
if os.name != "nt":
paths += list(Path(folder_path).rglob(f'*.{extension.upper()}'))
filtered_paths = []
for path in paths:
not_in_exclude_list = [
x not in Path(path).name for x in exclude_file_patterns
]
if all(not_in_exclude_list):
filtered_paths.append(path)
return {
"event_name": "upload_images_from_folder_to_project",
"properties":
{
"Image Count":
len(filtered_paths),
"Custom Extentions":
bool(args[2:3] or kwargs.get("extensions", None)),
"Annotation Status":
bool(args[3:4] or kwargs.get("annotation_status", None)),
"From S3":
bool(args[4:5] or kwargs.get("from_s3_bucket", None)),
"Custom Exclude Patters":
bool(
args[5:6] or kwargs.get("exclude_file_patterns", None)
)
}
}
def upload_images_from_s3_bucket_to_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "upload_images_from_s3_bucket_to_project",
"properties": {
"project_name": get_project_name(project)
}
}
def prepare_export(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "prepare_export",
"properties":
{
"project_name":
get_project_name(project),
"Folder Count":
bool(args[1:2] or kwargs.get("folder_names", None)),
"Annotation Statuses":
bool(args[2:3] or kwargs.get("annotation_statuses", None)),
"Include Fuse":
bool(args[3:4] or kwargs.get("include_fuse", None)),
"Only Pinned":
bool(args[4:5] or kwargs.get("only_pinned", None)),
}
}
def download_export(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "download_export",
"properties":
{
"project_name":
get_project_name(project),
"to_s3_bucket":
bool(args[4:5] or kwargs.get("to_s3_bucket", None)),
}
}
def dicom_to_rgb_sequence(*args, **kwargs):
return {"event_name": "dicom_to_rgb_sequence", "properties": {}}
def coco_split_dataset(*args, **kwargs):
ratio_list = kwargs.get("ratio_list", None)
if not ratio_list:
ratio_list = args[4]
return {
"event_name": "coco_split_dataset",
"properties": {
"ratio_list": str(ratio_list)
}
}
def run_training(*args, **kwargs):
task = kwargs.get("task", None)
if not task:
task = args[2]
log = kwargs.get("log", "empty")
if log == "empty":
log = args[7:8]
if not log:
log = False
else:
log = args[7]
train_data = kwargs.get("train_data", None)
if not train_data:
train_data = args[4]
test_data = kwargs.get("test_data", None)
if not test_data:
test_data = args[5]
data_structure = "Project"
for path in train_data + test_data:
if "/" in path:
data_structure = "Folder"
break
from superannotate.db.projects import get_project_metadata as sa_get_project_metadata
project_name = get_project_name(train_data[0])
project_metadata = sa_get_project_metadata(project_name)
return {
"event_name": "run_training",
"properties":
{
"Project Type": project_metadata['type'],
"Task": task,
"Data Structure": data_structure,
"Log": log
}
}
def assign_images(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
image_names = kwargs.get("image_names", None)
if not image_names:
image_names = args[1]
user = kwargs.get("user", None)
if not user:
user = args[2]
from superannotate.db.users import get_team_contributor_metadata
res = get_team_contributor_metadata(user)
user_role = "ADMIN"
if res['user_role'] == 3:
user_role = 'ANNOTATOR'
if res['user_role'] == 4:
user_role = 'QA'
from superannotate.db.project_api import get_project_and_folder_metadata
project, folder = get_project_and_folder_metadata(project)
is_root = True
if folder:
is_root = False
return {
"event_name": "assign_images",
"properties":
{
"project_name": get_project_name(project),
"Assign Folder": is_root,
"Image Count": len(image_names),
"User Role": user_role,
}
}
def move_image(*args, **kwargs):
project = kwargs.get("source_project", None)
if not project:
project = args[0]
return {
"event_name": "move_image",
"properties":
{
"project_name":
get_project_name(project),
"Move Annotations":
bool(args[3:4] or ("include_annotations" in kwargs)),
"Move Annotation Status":
bool(args[4:5] or ("copy_annotation_status" in kwargs)),
"Move Pin":
bool(args[5:6] or ("copy_pin" in kwargs)),
}
}
def pin_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "pin_image",
"properties":
{
"project_name": get_project_name(project),
"Pin": bool(args[2:3] or ("pin" in kwargs)),
}
}
def create_fuse_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
project_type = kwargs.get("project_type", None)
if not project_type:
project_type = args[2]
return {
"event_name": "create_fuse_image",
"properties":
{
"project_name": get_project_name(project),
"Project Type": project_type,
"Overlay": bool(args[4:5] or ("output_overlay" in kwargs)),
}
}
def set_image_annotation_status(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "set_image_annotation_status",
"properties":
{
"project_name":
get_project_name(project),
"Annotation Status":
bool(args[2:3] or ("annotation_status" in kwargs)),
}
}
def add_annotation_bbox_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_bbox_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[4:5] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[5:6] or ("error" in kwargs)),
}
}
def add_annotation_polygon_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_polygon_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[4:5] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[5:6] or ("error" in kwargs)),
}
}
def add_annotation_polyline_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_polyline_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[4:5] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[5:6] or ("error" in kwargs)),
}
}
def add_annotation_point_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_point_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[4:5] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[5:6] or ("error" in kwargs)),
}
}
def add_annotation_ellipse_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_ellipse_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[4:5] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[5:6] or ("error" in kwargs)),
}
}
def add_annotation_template_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_template_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[5:6] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[6:7] or ("error" in kwargs)),
}
}
def add_annotation_cuboid_to_image(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "add_annotation_cuboid_to_image",
"properties":
{
"project_name":
get_project_name(project),
"Attributes":
bool(
args[4:5] or ("annotation_class_attributes" in kwargs)
),
"Error":
bool(args[5:6] or ("error" in kwargs)),
}
}
def create_annotation_class(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "create_annotation_class",
"properties":
{
"project_name": get_project_name(project),
"Attributes": bool(args[3:4] or ("attribute_groups" in kwargs)),
}
}
def create_annotation_classes_from_classes_json(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "create_annotation_classes_from_classes_json",
"properties":
{
"project_name": get_project_name(project),
"From S3": bool(args[2:3] or ("from_s3_bucket" in kwargs)),
}
}
def class_distribution(*args, **kwargs):
return {
"event_name": "class_distribution",
"properties": {
"Plot": bool(args[2:3] or ("visualize" in kwargs)),
}
}
def attribute_distribution(*args, **kwargs):
return {
"event_name": "attribute_distribution",
"properties": {
"Plot": bool(args[2:3] or ("visualize" in kwargs)),
}
}
def share_project(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
user_role = kwargs.get("user_role", None)
if not user_role:
user_role = args[2]
return {
"event_name": "share_project",
"properties":
{
"project_name": get_project_name(project),
"User Role": user_role
}
}
def set_project_default_image_quality_in_editor(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
image_quality_in_editor = kwargs.get("image_quality_in_editor", None)
if not image_quality_in_editor:
image_quality_in_editor = args[1]
return {
"event_name": "set_project_default_image_quality_in_editor",
"properties":
{
"project_name": get_project_name(project),
"Image Quality": image_quality_in_editor
}
}
def get_exports(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "get_exports",
"properties":
{
"project_name": get_project_name(project),
"Metadata": bool(args[1:2] or ("return_metadata" in kwargs)),
}
}
def search_folders(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
return {
"event_name": "search_folders",
"properties":
{
"project_name": get_project_name(project),
"Metadata": bool(args[2:3] or ("return_metadata" in kwargs)),
}
}
def filter_images_by_tags(*args, **kwargs):
return {
"event_name": "filter_images_by_tags",
"properties":
{
"Include": bool(args[1:2] or ("include" in kwargs)),
"Exclude": bool(args[2:3] or ("exclude" in kwargs))
}
}
def filter_images_by_comments(*args, **kwargs):
return {
"event_name": "filter_images_by_comments",
"properties":
{
"Include Unresolved Comments":
bool(
args[1:2] or ("include_unresolved_comments" in kwargs)
),
"Include Resolved Comments":
bool(args[2:3] or ("include_resolved_comments" in kwargs))
}
}
def filter_annotation_instances(*args, **kwargs):
return {
"event_name": "filter_annotation_instances",
"properties":
{
"Include": bool(args[1:2] or ("include" in kwargs)),
"Exclude": bool(args[2:3] or ("exclude" in kwargs))
}
}
def aggregate_annotations_as_df(*args, **kwargs):
folder_names = kwargs.get("folder_names", "empty")
if folder_names == "empty":
folder_names = args[5:6]
if folder_names:
folder_names = folder_names[0]
if folder_names == None:
folder_names = []
return {
"event_name": "aggregate_annotations_as_df",
"properties": {
"Folder Count": len(folder_names),
}
}
def delete_folders(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
folder_names = kwargs.get("folder_names", None)
if not folder_names:
folder_names = args[1]
return {
"event_name": "delete_folders",
"properties":
{
"project_name": get_project_name(project),
"Folder Count": len(folder_names),
}
}
def delete_images(*args, **kwargs):
project = kwargs.get("project", None)
if not project:
project = args[0]
image_names = kwargs.get("image_names", False)
if not image_names:
image_names = args[1]
if image_names == None:
from superannotate.db.images import search_images as sa_search_images
image_names = sa_search_images(project)
return {
"event_name": "delete_images",
"properties":
{
"project_name": get_project_name(project),
"Image Count": len(image_names),
}
}
def unassign_folder(*args, **kwargs):
return {"event_name": "unassign_folder", "properties": {}}
def assign_folder(*args, **kwargs):
users = kwargs.get("users", None)
if not users:
users = args[2]
return {
"event_name": "assign_folder",
"properties": {
"User Count": len(users),
}
}
def unassign_images(*args, **kwargs):
image_names = kwargs.get("image_names", None)
if not image_names:
image_names = args[1]
project = kwargs.get("project", None)
if not project:
project = args[0]
from superannotate.db.project_api import get_project_and_folder_metadata
project, folder = get_project_and_folder_metadata(project)
is_root = True
if folder:
is_root = False
return {
"event_name": "unassign_images",
"properties":
{
"Assign Folder": is_root,
"Image Count": len(image_names)
}
}
| 28.441262 | 89 | 0.548219 |
5421a06ac37ae4a02d53cac0c7665a1cb2c2345b | 1,789 | py | Python | tests/test_callable_object_proxy.py | mgorny/wrapt | 264c06fd3850bd0cda6917ca3e87417b573e023f | [
"BSD-2-Clause"
] | 1,579 | 2015-01-01T09:30:58.000Z | 2022-03-31T18:58:00.000Z | tests/test_callable_object_proxy.py | mgorny/wrapt | 264c06fd3850bd0cda6917ca3e87417b573e023f | [
"BSD-2-Clause"
] | 174 | 2015-02-18T05:13:54.000Z | 2022-03-30T22:09:07.000Z | tests/test_callable_object_proxy.py | mgorny/wrapt | 264c06fd3850bd0cda6917ca3e87417b573e023f | [
"BSD-2-Clause"
] | 264 | 2015-01-23T07:46:46.000Z | 2022-03-10T22:53:48.000Z | from __future__ import print_function
import unittest
import wrapt
class TestPartialCallableObjectProxy(unittest.TestCase):
def test_no_arguments(self):
def func0():
return ((), {})
partial0 = wrapt.PartialCallableObjectProxy(func0)
args, kwargs = (), {}
self.assertEqual(partial0(), (args, kwargs))
def test_empty_arguments(self):
def func0(*args, **kwargs):
return (args, kwargs)
args, kwargs = (), {}
partial0 = wrapt.PartialCallableObjectProxy(func0, *args, **kwargs)
self.assertEqual(partial0(), (args, kwargs))
def test_1_positional_argument(self):
def func0(*args, **kwargs):
return (args, kwargs)
args, kwargs = (1,), {}
partial0 = wrapt.PartialCallableObjectProxy(func0, *args)
self.assertEqual(partial0(), (args, kwargs))
def test_1_keyword_argument(self):
def func0(*args, **kwargs):
return (args, kwargs)
args, kwargs = (), {'k1': 1}
partial0 = wrapt.PartialCallableObjectProxy(func0, **kwargs)
self.assertEqual(partial0(), (args, kwargs))
def test_multiple_positional_arguments(self):
def func0(*args, **kwargs):
return (args, kwargs)
args, kwargs = (1, 2, 3), {}
partial0 = wrapt.PartialCallableObjectProxy(func0, *args)
self.assertEqual(partial0(), (args, kwargs))
def test_multiple_keyword_arguments(self):
def func0(*args, **kwargs):
return (args, kwargs)
args, kwargs = (), {'k1': 1, 'k2': 2, 'k3': 3}
partial0 = wrapt.PartialCallableObjectProxy(func0, **kwargs)
self.assertEqual(partial0(), (args, kwargs))
if __name__ == '__main__':
unittest.main()
| 25.197183 | 75 | 0.606484 |
123f25ab7099ee5022edf9663aa10798da4a9aa8 | 6,265 | py | Python | midas.py | ShengCN/MiDaS | 49072369de6f7ae6e95ac15cf7ca71de225d1983 | [
"MIT"
] | null | null | null | midas.py | ShengCN/MiDaS | 49072369de6f7ae6e95ac15cf7ca71de225d1983 | [
"MIT"
] | null | null | null | midas.py | ShengCN/MiDaS | 49072369de6f7ae6e95ac15cf7ca71de225d1983 | [
"MIT"
] | null | null | null | """Compute depth maps for images in the input folder.
"""
import os
from os.path import join
import glob
import torch
import utils
import cv2
import argparse
from tqdm import tqdm
import pandas as pd
from torchvision.transforms import Compose
from midas.dpt_depth import DPTDepthModel
from midas.midas_net import MidasNet
from midas.midas_net_custom import MidasNet_small
from midas.transforms import Resize, NormalizeImage, PrepareForNet
def run(csv_path, output_path, model_path, model_type="large", optimize=True):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
input_df = pd.read_csv(csv_path)
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
# load network
if model_type == "dpt_large": # DPT-Large
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": #DPT-Hybrid
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode="minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
model = MidasNet(model_path, non_negative=True)
net_w, net_h = 384, 384
resize_mode="upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True})
net_w, net_h = 256, 256
resize_mode="upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize==True:
# rand_example = torch.rand(1, 3, net_h, net_w)
# model(rand_example)
# traced_script_module = torch.jit.trace(model, rand_example)
# model = traced_script_module
if device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input
# img_names = glob.glob(os.path.join(input_path, "*"))
# num_images = len(img_names)
# img_names
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
# import pdb; pdb.set_trace()
root = os.path.dirname(csv_path)
# for ind, img_name in enumerate(tqdm(img_names)):
for ind, v in tqdm(input_df.iterrows(), total=len(input_df), desc='MiDaS'):
img_name = input_df.at[ind, 'rgb']
# output
filename = os.path.join(
output_path, '{}_midas'.format(os.path.splitext(os.path.basename(img_name))[0])
)
input_df.at[ind, 'midas'] = os.path.relpath(filename, root) + '.png'
if os.path.exists(filename + '.png'):
continue
# input
img = utils.read_image(join(root, img_name))
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize==True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
utils.write_depth(filename, prediction, bits=2)
input_df.to_csv(csv_path, index=False)
print("finished")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_path',
default='output',
help='folder for output images'
)
parser.add_argument('-m', '--model_weights',
default=None,
help='path to the trained weights of model'
)
parser.add_argument('-t', '--model_type',
default='dpt_large',
help='model type: dpt_large, dpt_hybrid, midas_v21_large or midas_v21_small'
)
parser.add_argument('--optimize', dest='optimize', action='store_true')
parser.add_argument('--no-optimize', dest='optimize', action='store_false')
parser.add_argument('--csv', type=str, help='csv')
parser.set_defaults(optimize=True)
args = parser.parse_args()
default_models = {
"midas_v21_small": "weights/midas_v21_small-70d6b9c8.pt",
"midas_v21": "weights/midas_v21-f6b98070.pt",
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(args.csv, args.output_path, args.model_weights, args.model_type, args.optimize)
| 31.80203 | 147 | 0.602873 |
4118f7b82e488212c3f466b796cbf44fc0e0bea8 | 1,370 | py | Python | hydra/main.py | machineko/hydra | 4ba24129fff5ad274611ccd63a07532ff8d44585 | [
"MIT"
] | 1 | 2021-09-29T06:24:36.000Z | 2021-09-29T06:24:36.000Z | hydra/main.py | machineko/hydra | 4ba24129fff5ad274611ccd63a07532ff8d44585 | [
"MIT"
] | null | null | null | hydra/main.py | machineko/hydra | 4ba24129fff5ad274611ccd63a07532ff8d44585 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
from typing import Any, Callable, Optional
from omegaconf import DictConfig
from ._internal.utils import _run_hydra, get_args_parser
from .types import TaskFunction
def main(
config_path: Optional[str] = None,
config_name: Optional[str] = None,
) -> Callable[[TaskFunction], Any]:
"""
:param config_path: the config path, a directory relative to the declaring python file.
:param config_name: the name of the config (usually the file name without the .yaml extension)
"""
def main_decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def decorated_main(cfg_passthrough: Optional[DictConfig] = None) -> Any:
if cfg_passthrough is not None:
return task_function(cfg_passthrough)
else:
args = get_args_parser()
# no return value from run_hydra() as it may sometime actually run the task_function
# multiple times (--multirun)
_run_hydra(
args_parser=args,
task_function=task_function,
config_path=config_path,
config_name=config_name,
)
return decorated_main
return main_decorator
| 35.128205 | 100 | 0.643796 |
d3a4abfcf331f2228adb653c26f1fa62be2aa774 | 760 | py | Python | apps/tests/test_utilities.py | Zadigo/scrappers | eb5219c8363ac6d0229c0579de9b9e55cfa9fe3d | [
"MIT"
] | null | null | null | apps/tests/test_utilities.py | Zadigo/scrappers | eb5219c8363ac6d0229c0579de9b9e55cfa9fe3d | [
"MIT"
] | 114 | 2020-09-21T06:15:01.000Z | 2022-03-28T14:02:44.000Z | apps/tests/test_utilities.py | Zadigo/scrappers | eb5219c8363ac6d0229c0579de9b9e55cfa9fe3d | [
"MIT"
] | null | null | null | import unittest
from scrappers.scrappers.config.utilities import new_filename, prepare_values
class TestUtilities(unittest.TestCase):
def test_new_filename(self):
# ex. eugenie_bouchard_2019_7_1356e0.json
result = new_filename('eugenie_bouchard')
self.assertRegex(result, r'\w+\_\w+\_\d{4}\_\d+\_[a-z0-9]+\.\w+')
self.assertRegex(result, r'eugenie\_bouchard\_\d{4}\_\d+\_[a-z0-9]+\.\w+')
def test_decorator(self):
class Test:
@prepare_values
def save(self):
values = ['a', 'b', 'c']
return values
# prepare(self, celibrity=None)
self.assertEqual(Test().save.__name__, 'prepare')
if __name__ == "__main__":
unittest.main()
| 30.4 | 82 | 0.610526 |
a5d1db331d07c7b97c77a0929526bceaab6c372b | 655 | py | Python | main.py | andreas-bylund/thesis_generate_graphs | e6ebabba5269c163a531c140fb11fdaf27fb34c1 | [
"MIT"
] | null | null | null | main.py | andreas-bylund/thesis_generate_graphs | e6ebabba5269c163a531c140fb11fdaf27fb34c1 | [
"MIT"
] | null | null | null | main.py | andreas-bylund/thesis_generate_graphs | e6ebabba5269c163a531c140fb11fdaf27fb34c1 | [
"MIT"
] | null | null | null | from graphics import Graphics as gh
from gather_data_helper import Gather_data_helper
from t_test import T_test as T_test
import time
if __name__ == "__main__":
start_time = time.time()
dh = Gather_data_helper()
# Where is Custom_logs folder located?
custom_logs_folder_path = r"C:\Users\Andreas Bylund\Desktop\Backup - Tränade modeller och logs\Custom_logs"
# Get folder and file information
data = dh.gather_folder_information(custom_logs_folder_path)
# Get average result from all models
result = dh.get_average_validation_acc(data)
print(result)
print("--- %s seconds ---" % (time.time() - start_time))
| 27.291667 | 111 | 0.735878 |
95cd914025dc5a48a1db1225b115daf845723b63 | 52,017 | py | Python | scapy/layers/tls/handshake.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | scapy/layers/tls/handshake.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | null | null | null | scapy/layers/tls/handshake.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | 1 | 2018-11-15T12:37:04.000Z | 2018-11-15T12:37:04.000Z | ## This file is part of Scapy
## Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
## 2015, 2016, 2017 Maxence Tury
## This program is published under a GPLv2 license
"""
TLS handshake fields & logic.
This module covers the handshake TLS subprotocol, except for the key exchange
mechanisms which are addressed with keyexchange.py.
"""
from __future__ import absolute_import
import math
from scapy.error import log_runtime, warning
from scapy.fields import *
from scapy.compat import *
from scapy.packet import Packet, Raw, Padding
from scapy.utils import repr_hex
from scapy.layers.x509 import OCSP_Response
from scapy.layers.tls.cert import Cert, PrivKey, PubKey
from scapy.layers.tls.basefields import (_tls_version, _TLSVersionField,
_TLSClientVersionField)
from scapy.layers.tls.extensions import (_ExtensionsLenField, _ExtensionsField,
_cert_status_type, TLS_Ext_SupportedVersions)
from scapy.layers.tls.keyexchange import (_TLSSignature, _TLSServerParamsField,
_TLSSignatureField, ServerRSAParams,
SigAndHashAlgsField, _tls_hash_sig,
SigAndHashAlgsLenField)
from scapy.layers.tls.keyexchange_tls13 import TicketField
from scapy.layers.tls.session import (_GenericTLSSessionInheritance,
readConnState, writeConnState)
from scapy.layers.tls.crypto.compression import (_tls_compression_algs,
_tls_compression_algs_cls,
Comp_NULL, _GenericComp,
_GenericCompMetaclass)
from scapy.layers.tls.crypto.suites import (_tls_cipher_suites,
_tls_cipher_suites_cls,
_GenericCipherSuite,
_GenericCipherSuiteMetaclass)
###############################################################################
### Generic TLS Handshake message ###
###############################################################################
_tls_handshake_type = { 0: "hello_request", 1: "client_hello",
2: "server_hello", 3: "hello_verify_request",
4: "session_ticket", 6: "hello_retry_request",
8: "encrypted_extensions", 11: "certificate",
12: "server_key_exchange", 13: "certificate_request",
14: "server_hello_done", 15: "certificate_verify",
16: "client_key_exchange", 20: "finished",
21: "certificate_url", 22: "certificate_status",
23: "supplemental_data", 24: "key_update" }
class _TLSHandshake(_GenericTLSSessionInheritance):
"""
Inherited by other Handshake classes to get post_build().
Also used as a fallback for unknown TLS Handshake packets.
"""
name = "TLS Handshake Generic message"
fields_desc = [ ByteEnumField("msgtype", None, _tls_handshake_type),
ThreeBytesField("msglen", None),
StrLenField("msg", "",
length_from=lambda pkt: pkt.msglen) ]
def post_build(self, p, pay):
l = len(p)
if self.msglen is None:
l2 = l - 4
p = struct.pack("!I", (orb(p[0]) << 24) | l2) + p[4:]
return p + pay
def guess_payload_class(self, p):
return conf.padding_layer
def tls_session_update(self, msg_str):
"""
Covers both post_build- and post_dissection- context updates.
"""
self.tls_session.handshake_messages.append(msg_str)
self.tls_session.handshake_messages_parsed.append(self)
###############################################################################
### HelloRequest ###
###############################################################################
class TLSHelloRequest(_TLSHandshake):
name = "TLS Handshake - Hello Request"
fields_desc = [ ByteEnumField("msgtype", 0, _tls_handshake_type),
ThreeBytesField("msglen", None) ]
def tls_session_update(self, msg_str):
"""
Message should not be added to the list of handshake messages
that will be hashed in the finished and certificate verify messages.
"""
return
###############################################################################
### ClientHello fields ###
###############################################################################
class _GMTUnixTimeField(UTCTimeField):
"""
"The current time and date in standard UNIX 32-bit format (seconds since
the midnight starting Jan 1, 1970, GMT, ignoring leap seconds)."
"""
def i2h(self, pkt, x):
if x is not None:
return x
return 0
class _TLSRandomBytesField(StrFixedLenField):
def i2repr(self, pkt, x):
if x is None:
return repr(x)
return repr_hex(self.i2h(pkt,x))
class _SessionIDField(StrLenField):
"""
opaque SessionID<0..32>; section 7.4.1.2 of RFC 4346
"""
pass
class _CipherSuitesField(StrLenField):
__slots__ = ["itemfmt", "itemsize", "i2s", "s2i"]
islist = 1
def __init__(self, name, default, dico, length_from=None, itemfmt="!H"):
StrLenField.__init__(self, name, default, length_from=length_from)
self.itemfmt = itemfmt
self.itemsize = struct.calcsize(itemfmt)
i2s = self.i2s = {}
s2i = self.s2i = {}
for k in six.iterkeys(dico):
i2s[k] = dico[k]
s2i[dico[k]] = k
def any2i_one(self, pkt, x):
if (isinstance(x, _GenericCipherSuite) or
isinstance(x, _GenericCipherSuiteMetaclass)):
x = x.val
if isinstance(x, bytes):
x = self.s2i[x]
return x
def i2repr_one(self, pkt, x):
fmt = "0x%%0%dx" % self.itemsize
return self.i2s.get(x, fmt % x)
def any2i(self, pkt, x):
if x is None:
return None
if not isinstance(x, list):
x = [x]
return [self.any2i_one(pkt, z) for z in x]
def i2repr(self, pkt, x):
if x is None:
return "None"
l = [self.i2repr_one(pkt, z) for z in x]
if len(l) == 1:
l = l[0]
else:
l = "[%s]" % ", ".join(l)
return l
def i2m(self, pkt, val):
if val is None:
val = []
return b"".join(struct.pack(self.itemfmt, x) for x in val)
def m2i(self, pkt, m):
res = []
itemlen = struct.calcsize(self.itemfmt)
while m:
res.append(struct.unpack(self.itemfmt, m[:itemlen])[0])
m = m[itemlen:]
return res
def i2len(self, pkt, i):
if i is None:
return 0
return len(i)*self.itemsize
class _CompressionMethodsField(_CipherSuitesField):
def any2i_one(self, pkt, x):
if (isinstance(x, _GenericComp) or
isinstance(x, _GenericCompMetaclass)):
x = x.val
if isinstance(x, str):
x = self.s2i[x]
return x
###############################################################################
### ClientHello ###
###############################################################################
class TLSClientHello(_TLSHandshake):
"""
TLS ClientHello, with abilities to handle extensions.
The Random structure follows the RFC 5246: while it is 32-byte long,
many implementations use the first 4 bytes as a gmt_unix_time, and then
the remaining 28 byts should be completely random. This was designed in
order to (sort of) mitigate broken RNGs. If you prefer to show the full
32 random bytes without any GMT time, just comment in/out the lines below.
"""
name = "TLS Handshake - Client Hello"
fields_desc = [ ByteEnumField("msgtype", 1, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSClientVersionField("version", None, _tls_version),
#_TLSRandomBytesField("random_bytes", None, 32),
_GMTUnixTimeField("gmt_unix_time", None),
_TLSRandomBytesField("random_bytes", None, 28),
FieldLenField("sidlen", None, fmt="B", length_of="sid"),
_SessionIDField("sid", "",
length_from=lambda pkt:pkt.sidlen),
FieldLenField("cipherslen", None, fmt="!H",
length_of="ciphers"),
_CipherSuitesField("ciphers", None,
_tls_cipher_suites, itemfmt="!H",
length_from=lambda pkt: pkt.cipherslen),
FieldLenField("complen", None, fmt="B", length_of="comp"),
_CompressionMethodsField("comp", [0],
_tls_compression_algs,
itemfmt="B",
length_from=
lambda pkt: pkt.complen),
_ExtensionsLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", None,
length_from=lambda pkt: (pkt.msglen -
(pkt.sidlen or 0) -
(pkt.cipherslen or 0) -
(pkt.complen or 0) -
40)) ]
def post_build(self, p, pay):
if self.random_bytes is None:
p = p[:10] + randstring(28) + p[10+28:]
# if no ciphersuites were provided, we add a few usual, supported
# ciphersuites along with the appropriate extensions
if self.ciphers is None:
cipherstart = 39 + (self.sidlen or 0)
s = b"001ac02bc023c02fc027009e0067009c003cc009c0130033002f000a"
p = p[:cipherstart] + bytes_hex(s) + p[cipherstart+2:]
if self.ext is None:
ext_len = b'\x00\x2c'
ext_reneg = b'\xff\x01\x00\x01\x00'
ext_sn = b'\x00\x00\x00\x0f\x00\r\x00\x00\nsecdev.org'
ext_sigalg = b'\x00\r\x00\x08\x00\x06\x04\x03\x04\x01\x02\x01'
ext_supgroups = b'\x00\n\x00\x04\x00\x02\x00\x17'
p += ext_len + ext_reneg + ext_sn + ext_sigalg + ext_supgroups
return super(TLSClientHello, self).post_build(p, pay)
def tls_session_update(self, msg_str):
"""
Either for parsing or building, we store the client_random
along with the raw string representing this handshake message.
"""
super(TLSClientHello, self).tls_session_update(msg_str)
self.tls_session.advertised_tls_version = self.version
self.random_bytes = msg_str[10:38]
self.tls_session.client_random = (struct.pack('!I',
self.gmt_unix_time) +
self.random_bytes)
if self.ext:
for e in self.ext:
if isinstance(e, TLS_Ext_SupportedVersions):
if self.tls_session.tls13_early_secret is None:
# this is not recomputed if there was a TLS 1.3 HRR
self.tls_session.compute_tls13_early_secrets()
break
###############################################################################
### ServerHello ###
###############################################################################
class TLSServerHello(TLSClientHello):
"""
TLS ServerHello, with abilities to handle extensions.
The Random structure follows the RFC 5246: while it is 32-byte long,
many implementations use the first 4 bytes as a gmt_unix_time, and then
the remaining 28 byts should be completely random. This was designed in
order to (sort of) mitigate broken RNGs. If you prefer to show the full
32 random bytes without any GMT time, just comment in/out the lines below.
"""
name = "TLS Handshake - Server Hello"
fields_desc = [ ByteEnumField("msgtype", 2, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSVersionField("version", None, _tls_version),
#_TLSRandomBytesField("random_bytes", None, 32),
_GMTUnixTimeField("gmt_unix_time", None),
_TLSRandomBytesField("random_bytes", None, 28),
FieldLenField("sidlen", None, length_of="sid", fmt="B"),
_SessionIDField("sid", "",
length_from = lambda pkt: pkt.sidlen),
EnumField("cipher", None, _tls_cipher_suites),
_CompressionMethodsField("comp", [0],
_tls_compression_algs,
itemfmt="B",
length_from=lambda pkt: 1),
_ExtensionsLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", None,
length_from=lambda pkt: (pkt.msglen -
(pkt.sidlen or 0) -
38)) ]
#40)) ]
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 6:
version = struct.unpack("!H", _pkt[4:6])[0]
if version == 0x0304 or version > 0x7f00:
return TLS13ServerHello
return TLSServerHello
def post_build(self, p, pay):
if self.random_bytes is None:
p = p[:10] + randstring(28) + p[10+28:]
return super(TLSClientHello, self).post_build(p, pay)
def tls_session_update(self, msg_str):
"""
Either for parsing or building, we store the server_random
along with the raw string representing this handshake message.
We also store the session_id, the cipher suite (if recognized),
the compression method, and finally we instantiate the pending write
and read connection states. Usually they get updated later on in the
negotiation when we learn the session keys, and eventually they
are committed once a ChangeCipherSpec has been sent/received.
"""
super(TLSClientHello, self).tls_session_update(msg_str)
self.tls_session.tls_version = self.version
self.random_bytes = msg_str[10:38]
self.tls_session.server_random = (struct.pack('!I',
self.gmt_unix_time) +
self.random_bytes)
self.tls_session.sid = self.sid
cs_cls = None
if self.cipher:
cs_val = self.cipher
if cs_val not in _tls_cipher_suites_cls:
warning("Unknown cipher suite %d from ServerHello" % cs_val)
# we do not try to set a default nor stop the execution
else:
cs_cls = _tls_cipher_suites_cls[cs_val]
comp_cls = Comp_NULL
if self.comp:
comp_val = self.comp[0]
if comp_val not in _tls_compression_algs_cls:
err = "Unknown compression alg %d from ServerHello" % comp_val
warning(err)
comp_val = 0
comp_cls = _tls_compression_algs_cls[comp_val]
connection_end = self.tls_session.connection_end
self.tls_session.pwcs = writeConnState(ciphersuite=cs_cls,
compression_alg=comp_cls,
connection_end=connection_end,
tls_version=self.version)
self.tls_session.prcs = readConnState(ciphersuite=cs_cls,
compression_alg=comp_cls,
connection_end=connection_end,
tls_version=self.version)
class TLS13ServerHello(TLSClientHello):
""" TLS 1.3 ServerHello """
name = "TLS 1.3 Handshake - Server Hello"
fields_desc = [ ByteEnumField("msgtype", 2, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSVersionField("version", None, _tls_version),
_TLSRandomBytesField("random_bytes", None, 32),
EnumField("cipher", None, _tls_cipher_suites),
_ExtensionsLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", None,
length_from=lambda pkt: (pkt.msglen -
38)) ]
def tls_session_update(self, msg_str):
"""
Either for parsing or building, we store the server_random along with
the raw string representing this handshake message. We also store the
cipher suite (if recognized), and finally we instantiate the write and
read connection states.
"""
super(TLSClientHello, self).tls_session_update(msg_str)
s = self.tls_session
s.tls_version = self.version
s.server_random = self.random_bytes
cs_cls = None
if self.cipher:
cs_val = self.cipher
if cs_val not in _tls_cipher_suites_cls:
warning("Unknown cipher suite %d from ServerHello" % cs_val)
# we do not try to set a default nor stop the execution
else:
cs_cls = _tls_cipher_suites_cls[cs_val]
connection_end = s.connection_end
s.pwcs = writeConnState(ciphersuite=cs_cls,
connection_end=connection_end,
tls_version=self.version)
s.triggered_pwcs_commit = True
s.prcs = readConnState(ciphersuite=cs_cls,
connection_end=connection_end,
tls_version=self.version)
s.triggered_prcs_commit = True
if self.tls_session.tls13_early_secret is None:
# In case the connState was not pre-initialized, we could not
# compute the early secrets at the ClientHello, so we do it here.
self.tls_session.compute_tls13_early_secrets()
s.compute_tls13_handshake_secrets()
###############################################################################
### HelloRetryRequest ###
###############################################################################
class TLSHelloRetryRequest(_TLSHandshake):
name = "TLS 1.3 Handshake - Hello Retry Request"
fields_desc = [ ByteEnumField("msgtype", 6, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSVersionField("version", None, _tls_version),
_ExtensionsLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", None,
length_from=lambda pkt: pkt.msglen - 4) ]
###############################################################################
### EncryptedExtensions ###
###############################################################################
class TLSEncryptedExtensions(_TLSHandshake):
name = "TLS 1.3 Handshake - Encrypted Extensions"
fields_desc = [ ByteEnumField("msgtype", 8, _tls_handshake_type),
ThreeBytesField("msglen", None),
_ExtensionsLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", None,
length_from=lambda pkt: pkt.msglen - 2) ]
###############################################################################
### Certificate ###
###############################################################################
#XXX It might be appropriate to rewrite this mess with basic 3-byte FieldLenField.
class _ASN1CertLenField(FieldLenField):
"""
This is mostly a 3-byte FieldLenField.
"""
def __init__(self, name, default, length_of=None, adjust=lambda pkt, x: x):
self.length_of = length_of
self.adjust = adjust
Field.__init__(self, name, default, fmt="!I")
def i2m(self, pkt, x):
if x is None:
if self.length_of is not None:
fld,fval = pkt.getfield_and_val(self.length_of)
f = fld.i2len(pkt, fval)
x = self.adjust(pkt, f)
return x
def addfield(self, pkt, s, val):
return s + struct.pack(self.fmt, self.i2m(pkt,val))[1:4]
def getfield(self, pkt, s):
return s[3:], self.m2i(pkt, struct.unpack(self.fmt, b"\x00" + s[:3])[0])
class _ASN1CertListField(StrLenField):
islist = 1
def i2len(self, pkt, i):
if i is None:
return 0
return len(self.i2m(pkt, i))
def getfield(self, pkt, s):
"""
Extract Certs in a loop.
XXX We should provide safeguards when trying to parse a Cert.
"""
l = None
if self.length_from is not None:
l = self.length_from(pkt)
lst = []
ret = b""
m = s
if l is not None:
m, ret = s[:l], s[l:]
while m:
clen = struct.unpack("!I", b'\x00' + m[:3])[0]
lst.append((clen, Cert(m[3:3 + clen])))
m = m[3 + clen:]
return m + ret, lst
def i2m(self, pkt, i):
def i2m_one(i):
if isinstance(i, str):
return i
if isinstance(i, Cert):
s = i.der
l = struct.pack("!I", len(s))[1:4]
return l + s
(l, s) = i
if isinstance(s, Cert):
s = s.der
return struct.pack("!I", l)[1:4] + s
if i is None:
return b""
if isinstance(i, str):
return i
if isinstance(i, Cert):
i = [i]
return b"".join(i2m_one(x) for x in i)
def any2i(self, pkt, x):
return x
class _ASN1CertField(StrLenField):
def i2len(self, pkt, i):
if i is None:
return 0
return len(self.i2m(pkt, i))
def getfield(self, pkt, s):
l = None
if self.length_from is not None:
l = self.length_from(pkt)
ret = b""
m = s
if l is not None:
m, ret = s[:l], s[l:]
clen = struct.unpack("!I", b'\x00' + m[:3])[0]
len_cert = (clen, Cert(m[3:3 + clen]))
m = m[3 + clen:]
return m + ret, len_cert
def i2m(self, pkt, i):
def i2m_one(i):
if isinstance(i, str):
return i
if isinstance(i, Cert):
s = i.der
l = struct.pack("!I", len(s))[1:4]
return l + s
(l, s) = i
if isinstance(s, Cert):
s = s.der
return struct.pack("!I", l)[1:4] + s
if i is None:
return b""
return i2m_one(i)
def any2i(self, pkt, x):
return x
class TLSCertificate(_TLSHandshake):
"""
XXX We do not support RFC 5081, i.e. OpenPGP certificates.
"""
name = "TLS Handshake - Certificate"
fields_desc = [ ByteEnumField("msgtype", 11, _tls_handshake_type),
ThreeBytesField("msglen", None),
_ASN1CertLenField("certslen", None, length_of="certs"),
_ASN1CertListField("certs", [],
length_from = lambda pkt: pkt.certslen) ]
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
tls_session = kargs.get("tls_session", None)
if tls_session and (tls_session.tls_version or 0) >= 0x0304:
return TLS13Certificate
return TLSCertificate
def post_dissection_tls_session_update(self, msg_str):
self.tls_session_update(msg_str)
connection_end = self.tls_session.connection_end
if connection_end == "client":
self.tls_session.server_certs = [x[1] for x in self.certs]
else:
self.tls_session.client_certs = [x[1] for x in self.certs]
class _ASN1CertAndExt(_GenericTLSSessionInheritance):
name = "Certificate and Extensions"
fields_desc = [ _ASN1CertField("cert", ""),
FieldLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", [],
length_from=lambda pkt: pkt.extlen) ]
def extract_padding(self, s):
return b"", s
class _ASN1CertAndExtListField(PacketListField):
def m2i(self, pkt, m):
return self.cls(m, tls_session=pkt.tls_session)
class TLS13Certificate(_TLSHandshake):
name = "TLS 1.3 Handshake - Certificate"
fields_desc = [ ByteEnumField("msgtype", 11, _tls_handshake_type),
ThreeBytesField("msglen", None),
FieldLenField("cert_req_ctxt_len", None, fmt="B",
length_of="cert_req_ctxt"),
StrLenField("cert_req_ctxt", "",
length_from=lambda pkt: pkt.cert_req_ctxt_len),
_ASN1CertLenField("certslen", None, length_of="certs"),
_ASN1CertAndExtListField("certs", [], _ASN1CertAndExt,
length_from=lambda pkt: pkt.certslen) ]
def post_dissection_tls_session_update(self, msg_str):
self.tls_session_update(msg_str)
connection_end = self.tls_session.connection_end
if connection_end == "client":
if self.certs:
sc = [x.cert[1] for x in self.certs]
self.tls_session.server_certs = sc
else:
if self.certs:
cc = [x.cert[1] for x in self.certs]
self.tls_session.client_certs = cc
###############################################################################
### ServerKeyExchange ###
###############################################################################
class TLSServerKeyExchange(_TLSHandshake):
name = "TLS Handshake - Server Key Exchange"
fields_desc = [ ByteEnumField("msgtype", 12, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSServerParamsField("params", None,
length_from=lambda pkt: pkt.msglen),
_TLSSignatureField("sig", None,
length_from=lambda pkt: pkt.msglen - len(pkt.params)) ]
def build(self, *args, **kargs):
"""
We overload build() method in order to provide a valid default value
for params based on TLS session if not provided. This cannot be done by
overriding i2m() because the method is called on a copy of the packet.
The 'params' field is built according to key_exchange.server_kx_msg_cls
which should have been set after receiving a cipher suite in a
previous ServerHello. Usual cases are:
- None: for RSA encryption or fixed FF/ECDH. This should never happen,
as no ServerKeyExchange should be generated in the first place.
- ServerDHParams: for ephemeral FFDH. In that case, the parameter to
server_kx_msg_cls does not matter.
- ServerECDH*Params: for ephemeral ECDH. There are actually three
classes, which are dispatched by _tls_server_ecdh_cls_guess on
the first byte retrieved. The default here is b"\03", which
corresponds to ServerECDHNamedCurveParams (implicit curves).
When the Server*DHParams are built via .fill_missing(), the session
server_kx_privkey will be updated accordingly.
"""
fval = self.getfieldval("params")
if fval is None:
s = self.tls_session
if s.pwcs:
if s.pwcs.key_exchange.export:
cls = ServerRSAParams(tls_session=s)
else:
cls = s.pwcs.key_exchange.server_kx_msg_cls(b"\x03")
cls = cls(tls_session=s)
try:
cls.fill_missing()
except:
pass
else:
cls = Raw()
self.params = cls
fval = self.getfieldval("sig")
if fval is None:
s = self.tls_session
if s.pwcs:
if not s.pwcs.key_exchange.anonymous:
p = self.params
if p is None:
p = b""
m = s.client_random + s.server_random + raw(p)
cls = _TLSSignature(tls_session=s)
cls._update_sig(m, s.server_key)
else:
cls = Raw()
else:
cls = Raw()
self.sig = cls
return _TLSHandshake.build(self, *args, **kargs)
def post_dissection(self, pkt):
"""
While previously dissecting Server*DHParams, the session
server_kx_pubkey should have been updated.
XXX Add a 'fixed_dh' OR condition to the 'anonymous' test.
"""
s = self.tls_session
if s.prcs and s.prcs.key_exchange.no_ske:
pkt_info = pkt.firstlayer().summary()
log_runtime.info("TLS: useless ServerKeyExchange [%s]", pkt_info)
if (s.prcs and
not s.prcs.key_exchange.anonymous and
s.client_random and s.server_random and
s.server_certs and len(s.server_certs) > 0):
m = s.client_random + s.server_random + raw(self.params)
sig_test = self.sig._verify_sig(m, s.server_certs[0])
if not sig_test:
pkt_info = pkt.firstlayer().summary()
log_runtime.info("TLS: invalid ServerKeyExchange signature [%s]", pkt_info)
###############################################################################
### CertificateRequest ###
###############################################################################
_tls_client_certificate_types = { 1: "rsa_sign",
2: "dss_sign",
3: "rsa_fixed_dh",
4: "dss_fixed_dh",
5: "rsa_ephemeral_dh_RESERVED",
6: "dss_ephemeral_dh_RESERVED",
20: "fortezza_dms_RESERVED",
64: "ecdsa_sign",
65: "rsa_fixed_ecdh",
66: "ecdsa_fixed_ecdh" }
class _CertTypesField(_CipherSuitesField):
pass
class _CertAuthoritiesField(StrLenField):
"""
XXX Rework this with proper ASN.1 parsing.
"""
islist = 1
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:], self.m2i(pkt, s[:l])
def m2i(self, pkt, m):
res = []
while len(m) > 1:
l = struct.unpack("!H", m[:2])[0]
if len(m) < l + 2:
res.append((l, m[2:]))
break
dn = m[2:2+l]
res.append((l, dn))
m = m[2+l:]
return res
def i2m(self, pkt, i):
return b"".join(map(lambda x_y: struct.pack("!H", x_y[0]) + x_y[1], i))
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def i2len(self, pkt, val):
if val is None:
return 0
else:
return len(self.i2m(pkt, val))
class TLSCertificateRequest(_TLSHandshake):
name = "TLS Handshake - Certificate Request"
fields_desc = [ ByteEnumField("msgtype", 13, _tls_handshake_type),
ThreeBytesField("msglen", None),
FieldLenField("ctypeslen", None, fmt="B",
length_of="ctypes"),
_CertTypesField("ctypes", [1, 64],
_tls_client_certificate_types,
itemfmt="!B",
length_from=lambda pkt: pkt.ctypeslen),
SigAndHashAlgsLenField("sig_algs_len", None,
length_of="sig_algs"),
SigAndHashAlgsField("sig_algs", [0x0403, 0x0401, 0x0201],
EnumField("hash_sig", None, _tls_hash_sig),
length_from=lambda pkt: pkt.sig_algs_len),
FieldLenField("certauthlen", None, fmt="!H",
length_of="certauth"),
_CertAuthoritiesField("certauth", [],
length_from=lambda pkt: pkt.certauthlen) ]
###############################################################################
### ServerHelloDone ###
###############################################################################
class TLSServerHelloDone(_TLSHandshake):
name = "TLS Handshake - Server Hello Done"
fields_desc = [ ByteEnumField("msgtype", 14, _tls_handshake_type),
ThreeBytesField("msglen", None) ]
###############################################################################
### CertificateVerify ###
###############################################################################
class TLSCertificateVerify(_TLSHandshake):
name = "TLS Handshake - Certificate Verify"
fields_desc = [ ByteEnumField("msgtype", 15, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSSignatureField("sig", None,
length_from=lambda pkt: pkt.msglen) ]
def build(self, *args, **kargs):
sig = self.getfieldval("sig")
if sig is None:
s = self.tls_session
m = b"".join(s.handshake_messages)
if s.tls_version >= 0x0304:
if s.connection_end == "client":
context_string = "TLS 1.3, client CertificateVerify"
elif s.connection_end == "server":
context_string = "TLS 1.3, server CertificateVerify"
m = b"\x20"*64 + context_string + b"\x00" + s.wcs.hash.digest(m)
self.sig = _TLSSignature(tls_session=s)
if s.connection_end == "client":
self.sig._update_sig(m, s.client_key)
elif s.connection_end == "server":
# should be TLS 1.3 only
self.sig._update_sig(m, s.server_key)
return _TLSHandshake.build(self, *args, **kargs)
def post_dissection(self, pkt):
s = self.tls_session
m = b"".join(s.handshake_messages)
if s.tls_version >= 0x0304:
if s.connection_end == "client":
context_string = b"TLS 1.3, server CertificateVerify"
elif s.connection_end == "server":
context_string = b"TLS 1.3, client CertificateVerify"
m = b"\x20"*64 + context_string + b"\x00" + s.rcs.hash.digest(m)
if s.connection_end == "server":
if s.client_certs and len(s.client_certs) > 0:
sig_test = self.sig._verify_sig(m, s.client_certs[0])
if not sig_test:
pkt_info = pkt.firstlayer().summary()
log_runtime.info("TLS: invalid CertificateVerify signature [%s]", pkt_info)
elif s.connection_end == "client":
# should be TLS 1.3 only
if s.server_certs and len(s.server_certs) > 0:
sig_test = self.sig._verify_sig(m, s.server_certs[0])
if not sig_test:
pkt_info = pkt.firstlayer().summary()
log_runtime.info("TLS: invalid CertificateVerify signature [%s]", pkt_info)
###############################################################################
### ClientKeyExchange ###
###############################################################################
class _TLSCKExchKeysField(PacketField):
__slots__ = ["length_from"]
holds_packet = 1
def __init__(self, name, length_from=None, remain=0):
self.length_from = length_from
PacketField.__init__(self, name, None, None, remain=remain)
def m2i(self, pkt, m):
"""
The client_kx_msg may be either None, EncryptedPreMasterSecret
(for RSA encryption key exchange), ClientDiffieHellmanPublic,
or ClientECDiffieHellmanPublic. When either one of them gets
dissected, the session context is updated accordingly.
"""
l = self.length_from(pkt)
tbd, rem = m[:l], m[l:]
s = pkt.tls_session
cls = None
if s.prcs and s.prcs.key_exchange:
cls = s.prcs.key_exchange.client_kx_msg_cls
if cls is None:
return Raw(tbd)/Padding(rem)
return cls(tbd, tls_session=s)/Padding(rem)
class TLSClientKeyExchange(_TLSHandshake):
"""
This class mostly works like TLSServerKeyExchange and its 'params' field.
"""
name = "TLS Handshake - Client Key Exchange"
fields_desc = [ ByteEnumField("msgtype", 16, _tls_handshake_type),
ThreeBytesField("msglen", None),
_TLSCKExchKeysField("exchkeys",
length_from = lambda pkt: pkt.msglen) ]
def build(self, *args, **kargs):
fval = self.getfieldval("exchkeys")
if fval is None:
s = self.tls_session
if s.prcs:
cls = s.prcs.key_exchange.client_kx_msg_cls
cls = cls(tls_session=s)
else:
cls = Raw()
self.exchkeys = cls
return _TLSHandshake.build(self, *args, **kargs)
###############################################################################
### Finished ###
###############################################################################
class _VerifyDataField(StrLenField):
def getfield(self, pkt, s):
if pkt.tls_session.tls_version == 0x0300:
sep = 36
elif pkt.tls_session.tls_version >= 0x0304:
sep = pkt.tls_session.rcs.hash.hash_len
else:
sep = 12
return s[sep:], s[:sep]
class TLSFinished(_TLSHandshake):
name = "TLS Handshake - Finished"
fields_desc = [ ByteEnumField("msgtype", 20, _tls_handshake_type),
ThreeBytesField("msglen", None),
_VerifyDataField("vdata", None) ]
def build(self, *args, **kargs):
fval = self.getfieldval("vdata")
if fval is None:
s = self.tls_session
handshake_msg = b"".join(s.handshake_messages)
con_end = s.connection_end
if s.tls_version < 0x0304:
ms = s.master_secret
self.vdata = s.wcs.prf.compute_verify_data(con_end, "write",
handshake_msg, ms)
else:
self.vdata = s.compute_tls13_verify_data(con_end, "write")
return _TLSHandshake.build(self, *args, **kargs)
def post_dissection(self, pkt):
s = self.tls_session
if not s.frozen:
handshake_msg = b"".join(s.handshake_messages)
if s.tls_version < 0x0304 and s.master_secret is not None:
ms = s.master_secret
con_end = s.connection_end
verify_data = s.rcs.prf.compute_verify_data(con_end, "read",
handshake_msg, ms)
if self.vdata != verify_data:
pkt_info = pkt.firstlayer().summary()
log_runtime.info("TLS: invalid Finished received [%s]", pkt_info)
elif s.tls_version >= 0x0304:
con_end = s.connection_end
verify_data = s.compute_tls13_verify_data(con_end, "read")
if self.vdata != verify_data:
pkt_info = pkt.firstlayer().summary()
log_runtime.info("TLS: invalid Finished received [%s]", pkt_info)
def post_build_tls_session_update(self, msg_str):
self.tls_session_update(msg_str)
s = self.tls_session
if s.tls_version >= 0x0304:
s.pwcs = writeConnState(ciphersuite=type(s.wcs.ciphersuite),
connection_end=s.connection_end,
tls_version=s.tls_version)
s.triggered_pwcs_commit = True
if s.connection_end == "server":
s.compute_tls13_traffic_secrets()
elif s.connection_end == "client":
s.compute_tls13_traffic_secrets_end()
s.compute_tls13_resumption_secret()
def post_dissection_tls_session_update(self, msg_str):
self.tls_session_update(msg_str)
s = self.tls_session
if s.tls_version >= 0x0304:
s.prcs = readConnState(ciphersuite=type(s.rcs.ciphersuite),
connection_end=s.connection_end,
tls_version=s.tls_version)
s.triggered_prcs_commit = True
if s.connection_end == "client":
s.compute_tls13_traffic_secrets()
elif s.connection_end == "server":
s.compute_tls13_traffic_secrets_end()
s.compute_tls13_resumption_secret()
## Additional handshake messages
###############################################################################
### HelloVerifyRequest ###
###############################################################################
class TLSHelloVerifyRequest(_TLSHandshake):
"""
Defined for DTLS, see RFC 6347.
"""
name = "TLS Handshake - Hello Verify Request"
fields_desc = [ ByteEnumField("msgtype", 21, _tls_handshake_type),
ThreeBytesField("msglen", None),
FieldLenField("cookielen", None,
fmt="B", length_of="cookie"),
StrLenField("cookie", "",
length_from=lambda pkt: pkt.cookielen) ]
###############################################################################
### CertificateURL ###
###############################################################################
_tls_cert_chain_types = { 0: "individual_certs",
1: "pkipath" }
class URLAndOptionalHash(Packet):
name = "URLAndOptionHash structure for TLSCertificateURL"
fields_desc = [ FieldLenField("urllen", None, length_of="url"),
StrLenField("url", "",
length_from=lambda pkt: pkt.urllen),
FieldLenField("hash_present", None,
fmt="B", length_of="hash",
adjust=lambda pkt,x: int(math.ceil(x/20.))),
StrLenField("hash", "",
length_from=lambda pkt: 20*pkt.hash_present) ]
def guess_payload_class(self, p):
return Padding
class TLSCertificateURL(_TLSHandshake):
"""
Defined in RFC 4366. PkiPath structure of section 8 is not implemented yet.
"""
name = "TLS Handshake - Certificate URL"
fields_desc = [ ByteEnumField("msgtype", 21, _tls_handshake_type),
ThreeBytesField("msglen", None),
ByteEnumField("certchaintype", None, _tls_cert_chain_types),
FieldLenField("uahlen", None, length_of="uah"),
PacketListField("uah", [], URLAndOptionalHash,
length_from=lambda pkt: pkt.uahlen) ]
###############################################################################
### CertificateStatus ###
###############################################################################
class ThreeBytesLenField(FieldLenField):
def __init__(self, name, default, length_of=None, adjust=lambda pkt, x:x):
FieldLenField.__init__(self, name, default, length_of=length_of,
fmt='!I', adjust=adjust)
def i2repr(self, pkt, x):
if x is None:
return 0
return repr(self.i2h(pkt,x))
def addfield(self, pkt, s, val):
return s+struct.pack(self.fmt, self.i2m(pkt,val))[1:4]
def getfield(self, pkt, s):
return s[3:], self.m2i(pkt, struct.unpack(self.fmt, b"\x00"+s[:3])[0])
_cert_status_cls = { 1: OCSP_Response }
class _StatusField(PacketField):
def m2i(self, pkt, m):
idtype = pkt.status_type
cls = self.cls
if idtype in _cert_status_cls:
cls = _cert_status_cls[idtype]
return cls(m)
class TLSCertificateStatus(_TLSHandshake):
name = "TLS Handshake - Certificate Status"
fields_desc = [ ByteEnumField("msgtype", 22, _tls_handshake_type),
ThreeBytesField("msglen", None),
ByteEnumField("status_type", 1, _cert_status_type),
ThreeBytesLenField("responselen", None,
length_of="response"),
_StatusField("response", None, Raw) ]
###############################################################################
### SupplementalData ###
###############################################################################
class SupDataEntry(Packet):
name = "Supplemental Data Entry - Generic"
fields_desc = [ ShortField("sdtype", None),
FieldLenField("len", None, length_of="data"),
StrLenField("data", "",
length_from=lambda pkt:pkt.len) ]
def guess_payload_class(self, p):
return Padding
class UserMappingData(Packet):
name = "User Mapping Data"
fields_desc = [ ByteField("version", None),
FieldLenField("len", None, length_of="data"),
StrLenField("data", "",
length_from=lambda pkt: pkt.len)]
def guess_payload_class(self, p):
return Padding
class SupDataEntryUM(Packet):
name = "Supplemental Data Entry - User Mapping"
fields_desc = [ ShortField("sdtype", None),
FieldLenField("len", None, length_of="data",
adjust=lambda pkt, x: x+2),
FieldLenField("dlen", None, length_of="data"),
PacketListField("data", [], UserMappingData,
length_from=lambda pkt:pkt.dlen) ]
def guess_payload_class(self, p):
return Padding
class TLSSupplementalData(_TLSHandshake):
name = "TLS Handshake - Supplemental Data"
fields_desc = [ ByteEnumField("msgtype", 23, _tls_handshake_type),
ThreeBytesField("msglen", None),
ThreeBytesLenField("sdatalen", None, length_of="sdata"),
PacketListField("sdata", [], SupDataEntry,
length_from=lambda pkt: pkt.sdatalen) ]
###############################################################################
### NewSessionTicket ###
###############################################################################
class TLSNewSessionTicket(_TLSHandshake):
"""
XXX When knowing the right secret, we should be able to read the ticket.
"""
name = "TLS Handshake - New Session Ticket"
fields_desc = [ ByteEnumField("msgtype", 4, _tls_handshake_type),
ThreeBytesField("msglen", None),
IntField("lifetime", 0xffffffff),
FieldLenField("ticketlen", None, length_of="ticket"),
StrLenField("ticket", "",
length_from=lambda pkt: pkt.ticketlen) ]
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
s = kargs.get("tls_session", None)
if s and s.tls_version >= 0x0304:
return TLS13NewSessionTicket
return TLSNewSessionTicket
def post_dissection_tls_session_update(self, msg_str):
self.tls_session_update(msg_str)
if self.tls_session.connection_end == "client":
self.tls_session.client_session_ticket = self.ticket
class TLS13NewSessionTicket(_TLSHandshake):
"""
Uncomment the TicketField line for parsing a RFC 5077 ticket.
"""
name = "TLS Handshake - New Session Ticket"
fields_desc = [ ByteEnumField("msgtype", 4, _tls_handshake_type),
ThreeBytesField("msglen", None),
IntField("ticket_lifetime", 0xffffffff),
IntField("ticket_age_add", 0),
FieldLenField("ticketlen", None, length_of="ticket"),
#TicketField("ticket", "",
StrLenField("ticket", "",
length_from=lambda pkt: pkt.ticketlen),
_ExtensionsLenField("extlen", None, length_of="ext"),
_ExtensionsField("ext", None,
length_from=lambda pkt: (pkt.msglen -
(pkt.ticketlen or 0) -
12)) ]
def post_dissection_tls_session_update(self, msg_str):
self.tls_session_update(msg_str)
if self.tls_session.connection_end == "client":
self.tls_session.client_session_ticket = self.ticket
###############################################################################
### All handshake messages defined in this module ###
###############################################################################
_tls_handshake_cls = { 0: TLSHelloRequest, 1: TLSClientHello,
2: TLSServerHello, 3: TLSHelloVerifyRequest,
4: TLSNewSessionTicket, 6: TLSHelloRetryRequest,
8: TLSEncryptedExtensions, 11: TLSCertificate,
12: TLSServerKeyExchange, 13: TLSCertificateRequest,
14: TLSServerHelloDone, 15: TLSCertificateVerify,
16: TLSClientKeyExchange, 20: TLSFinished,
21: TLSCertificateURL, 22: TLSCertificateStatus,
23: TLSSupplementalData }
| 42.25589 | 95 | 0.503739 |
af77dbcd1db49c1299cf54d2a17a85919806e715 | 493 | py | Python | packages/python/plotly/plotly/validators/histogram2dcontour/contours/_type.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/histogram2dcontour/contours/_type.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/histogram2dcontour/contours/_type.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="type", parent_name="histogram2dcontour.contours", **kwargs
):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["levels", "constraint"]),
**kwargs,
)
| 32.866667 | 85 | 0.64503 |
08e78f7db525b0b2e56cac96c283c182bd39875d | 1,315 | py | Python | build/dart/label_to_package_name.py | dahlia-os/fuchsia-pine64-pinephone | 57aace6f0b0bd75306426c98ab9eb3ff4524a61d | [
"BSD-3-Clause"
] | 14 | 2020-10-25T05:48:36.000Z | 2021-09-20T02:46:20.000Z | build/dart/label_to_package_name.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | 1 | 2022-01-14T23:38:40.000Z | 2022-01-14T23:38:40.000Z | build/dart/label_to_package_name.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | 4 | 2020-12-28T17:04:45.000Z | 2022-03-12T03:20:44.000Z | #!/usr/bin/env python3.8
# Copyright 2016 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
# TODO(abarth): Base these paths on the sdk_dirs variable in gn.
_SDK_DIRS = [
"garnet/public/",
"topaz/public/",
]
# Strip the sdk dirs from the given label, if necessary.
def _remove_sdk_dir(label):
for prefix in _SDK_DIRS:
if label.startswith(prefix):
return label[len(prefix):]
return label
# For target //foo/bar:blah, the package name will be foo.bar._blah.
# For default targets //foo/bar:bar, the package name will be foo.bar.
def convert(label):
if not label.startswith("//"):
sys.stderr.write("expected label to start with //, got %s\n" % label)
return 1
base = _remove_sdk_dir(label[2:])
separator_index = base.rfind(":")
if separator_index < 0:
sys.stderr.write("could not find target name in label %s\n" % label)
return 1
path = base[:separator_index].split("/")
name = base[separator_index + 1:]
if path[-1] == name:
return ".".join(path)
else:
return "%s._%s" % (".".join(path), name)
def main():
print(convert(sys.argv[1]))
if __name__ == '__main__':
sys.exit(main())
| 27.395833 | 77 | 0.642586 |
9e37393eee26fe8eebf92b54b78ba193cac7d484 | 510 | py | Python | graphics/lines_graphic.py | marcioaug/mutants-dm | 7c241f6c9073b557769906055ee61efb05363c8b | [
"MIT"
] | null | null | null | graphics/lines_graphic.py | marcioaug/mutants-dm | 7c241f6c9073b557769906055ee61efb05363c8b | [
"MIT"
] | null | null | null | graphics/lines_graphic.py | marcioaug/mutants-dm | 7c241f6c9073b557769906055ee61efb05363c8b | [
"MIT"
] | null | null | null | from matplotlib import pyplot
variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]
bias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
pyplot.plot(xs, variance, 'g-', label='variance')
pyplot.plot(xs, bias_squared, 'r-.', label='bias^2')
pyplot.plot(xs, total_error, 'b:', label='total error')
pyplot.legend(loc=9)
pyplot.xlabel("complexidade do modelo")
pyplot.title("Compromisso entre Polarização e Variância")
pyplot.show() | 30 | 61 | 0.692157 |
e163d4601d8050c9b9bb02a37341ef7b7d849acc | 4,071 | py | Python | project/assets_cartographer/parser.py | 4383/recalbox-manager | 4da0f10d6b7c47d40b6550b3e9f96c6ee90a53bb | [
"MIT"
] | 61 | 2016-11-07T02:16:48.000Z | 2021-09-22T19:32:14.000Z | project/assets_cartographer/parser.py | DOCK-PI3/MasOS-Manager | ea5e2e6a20dc7853b5c1eeeb7f5dd358bff6d4b7 | [
"MIT"
] | 36 | 2015-12-05T10:24:27.000Z | 2019-06-25T10:50:29.000Z | project/assets_cartographer/parser.py | DOCK-PI3/MasOS-Manager | ea5e2e6a20dc7853b5c1eeeb7f5dd358bff6d4b7 | [
"MIT"
] | 33 | 2017-01-02T16:07:25.000Z | 2022-02-08T05:09:02.000Z | """
Manifest parsers
The assets manifest is a JSON file like this : ::
{
"stylesheets": {
"css/recalbox.min.css": [
"css/app.css"
]
},
"javascripts": {
"js/modernizr.min.js": [
"js/foundation5/vendor/modernizr.js"
],
}
}
This should be usable with grunt/gulp but without "glob" patterns.
Asset package key name must be the filepath to the package file and
contain a list of asset file to package.
Note also that each path is relative to static directories, for
gulp/grunt you would have to prepend them with the path to the project static dir (not
the app static dirs, as they would not be reachable from Grung/Gulp)
This would eventually not work with static files through S3/etc..
"""
import os
from django.conf import settings
from django.template import Context
from django.template.loader import get_template as loader_get_template
from django.contrib.staticfiles import finders
class AssetMapError(Exception):
pass
class StaticfileAssetNotFound(Exception):
pass
class AssetTagsManagerBase(object):
"""
Base for management assets using given asset map
Just take assets map to get its files and render their HTML "loader" fragment
This does not intend to compress/minify/uglify asset, just rendering their tags to
load them from your template
@assets_map: file maps for an asset kind (not the full asset map)
"""
def __init__(self, assets_map):
self.assets_map = assets_map
def render_fragment(self, template, context=None):
"""
Render fragment using given django template
"""
return template.render(context)
def static_url(self, filepath):
"""
Have to raise a custom exception instead of output print
Check if given relative file path exists in any static directory but
only is ASSETS_STRICT is enabled.
Finally if there is not exception, return the static file url
"""
if settings.ASSETS_STRICT:
if not finders.find(filepath):
raise StaticfileAssetNotFound("Asset file cannot be finded in any static directory: {}".format(filepath))
return os.path.join(settings.STATIC_URL, filepath)
def get_files(self, name):
"""
Find and return asset file url given package name
"""
try:
file_paths = self.assets_map[name]
except KeyError:
if settings.ASSETS_STRICT:
raise AssetMapError("Asset key '{}' does not exists in your asset map".format(name))
else:
if settings.ASSETS_PACKAGED:
return [self.static_url(name)]
else:
return [self.static_url(item) for item in file_paths]
return []
def render(self, names, template):
"""
Return rendered given template for each asset files of each package names
"""
tags = []
for name in names:
asset_files = self.get_files(name)
for item in filter(None, asset_files):
tags.append( self.render_fragment(template, context=Context({"ASSET_URL": item})) )
return '\n'.join(tags)
class AssetTagsManagerFromManifest(AssetTagsManagerBase):
"""
Override AssetTagsManagerBase to implement management from the whole
manifest
"""
def __init__(self, manifest):
self.manifest = manifest # full asset map from settings
self.templates = self.get_templates()
def get_templates(self):
"""
Render fragment using given django template
"""
templates = {}
for k,v in settings.ASSETS_TAG_TEMPLATES.items():
templates[k] = loader_get_template(v)
return templates
def render_for_kind(self, names, kind):
self.assets_map = self.manifest[kind]
return self.render(names, self.templates[kind])
| 31.315385 | 121 | 0.633505 |
86ebacc47652992ecf7a326b77b60b8e9787fdc1 | 3,447 | py | Python | src/production_cost.py | Smeaol22/usda_market_data | 9743c352642643b80c1a7513e3a115e6b1b22f01 | [
"BSD-2-Clause"
] | null | null | null | src/production_cost.py | Smeaol22/usda_market_data | 9743c352642643b80c1a7513e3a115e6b1b22f01 | [
"BSD-2-Clause"
] | null | null | null | src/production_cost.py | Smeaol22/usda_market_data | 9743c352642643b80c1a7513e3a115e6b1b22f01 | [
"BSD-2-Clause"
] | null | null | null | import pandas as pd
import requests
from src.conf import ReportType, USDA_MARKET_BASE_URL
from src.error import ErrorCode, UsdaMarketRequestError
from src.main import retrieve_published_reports_by_criteria
from src.utils import extract_element_from_inline_bytes
class ProductionCostStates(enumerate):
illinois = 'GX_GR210'
north_carolina = 'RA_GR210'
south_carolina = 'CO_GR210 '
iowa = 'NW_GR210'
def get_cost_production_historic(production_cost_state, start_date=None, end_date=None):
"""
This function is useful to retrieve all information
Args:
production_cost_state (str): ProductionCostStates.{state} see ProductionCostStates
start_date (timestamp): start date to retrieve report (if None search all report in past)
end_date (timestamp): last date to retrieve report (if None search until today)
Returns:
(list): list all report url
"""
dataframe_result = retrieve_published_reports_by_criteria(ReportType.PRODUCTION_COST,
field_slug_title_value=production_cost_state,
start_date=start_date, end_date=end_date)
production_cost_df_list = []
for date, report_url in zip(dataframe_result['Report Date'], dataframe_result['Document']):
production_cost_df_list.append(extract_cost_report_to_df(USDA_MARKET_BASE_URL + report_url, date))
return pd.concat(production_cost_df_list, ignore_index=True)
def extract_cost_report_to_df(report_url, date, columns=None,
start_signal=None, end_signal="source"):
"""
This function is useful to convert cost production report into a dataframe
Args:
report_url (str): report url
date (timestamp): date of the reports
columns (list): column list for the output dataframe
start_signal (list): list of elements to detect begin of the table
end_signal (str): string signaling table line after ending
Returns:
(dataframe): dataframe obtain from the usda report table
"""
if columns is None:
columns = ['product', 'offer', 'average', 'date']
if start_signal is None:
start_signal = ["product", "price"]
report_bytes = requests.get(report_url)
if report_bytes.status_code >= 300 or report_bytes.status_code < 200:
raise UsdaMarketRequestError(
f"Request failed to retrieve published reports with code {report_bytes.status_code}",
ErrorCode.REQUEST_SUBMISSION_ERROR)
inline_report = report_bytes.content.split(b'\n')
start_signal_detected = False
report_data = {}
for label in columns:
report_data[label] = []
for line_report in inline_report:
elt = extract_element_from_inline_bytes(line_report)
if start_signal_detected and elt[0].lower().startswith(end_signal):
break
if start_signal_detected and elt[0] not in ['\\r', 'Change\\r']:
for index, label in enumerate(columns):
if label == 'date':
report_data[label] = report_data[label] + [date]
else:
report_data[label] = report_data[label] + [elt[index]]
if not start_signal_detected and elt[0].lower() in start_signal:
start_signal_detected = True
return pd.DataFrame(report_data, columns=columns)
| 43.0875 | 107 | 0.67537 |
afd6506029c8f1b8325c4d3628550175e7558f5b | 1,560 | py | Python | src/sol/handle_metaplex.py | dting/staketaxcsv | f25005c0c732dd6e19d2e46c6d602ab78db65868 | [
"MIT"
] | 1 | 2022-02-05T05:37:08.000Z | 2022-02-05T05:37:08.000Z | src/sol/handle_metaplex.py | dting/staketaxcsv | f25005c0c732dd6e19d2e46c6d602ab78db65868 | [
"MIT"
] | null | null | null | src/sol/handle_metaplex.py | dting/staketaxcsv | f25005c0c732dd6e19d2e46c6d602ab78db65868 | [
"MIT"
] | 1 | 2022-02-05T05:37:11.000Z | 2022-02-05T05:37:11.000Z | from common.make_tx import make_swap_tx
from sol.handle_simple import handle_unknown_detect_transfers
def handle_metaplex(exporter, txinfo):
transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
else:
handle_unknown_detect_transfers(exporter, txinfo)
def is_nft_mint(txinfo):
log_instructions = txinfo.log_instructions
transfers_in, transfers_out, _ = txinfo.transfers_net
if "MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0:
return True
elif ("MintTo" in log_instructions
and len(transfers_out) == 1
and len(transfers_in) == 1
and transfers_in[0][0] == 1):
return True
else:
return False
def handle_nft_mint(exporter, txinfo):
transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
return
handle_unknown_detect_transfers(exporter, txinfo)
| 34.666667 | 98 | 0.712821 |
8befd25821c196d0483a5d630838de46109d5ac0 | 254 | py | Python | gcloud_notebook_training/notebook_utils.py | gclouduniverse/notebook_training | 40dbffb970925ebc0528d5042dbd4fda1c26bf3d | [
"MIT"
] | 7 | 2020-03-18T01:17:29.000Z | 2021-11-11T00:35:09.000Z | gcloud_notebook_training/notebook_utils.py | gclouduniverse/notebook_training | 40dbffb970925ebc0528d5042dbd4fda1c26bf3d | [
"MIT"
] | 5 | 2020-11-17T07:05:16.000Z | 2021-04-30T21:05:01.000Z | gcloud_notebook_training/notebook_utils.py | gclouduniverse/notebook_training | 40dbffb970925ebc0528d5042dbd4fda1c26bf3d | [
"MIT"
] | 1 | 2021-02-25T17:28:49.000Z | 2021-02-25T17:28:49.000Z | import json
def get_container_uri(notebook_path):
with open(notebook_path, "r") as read_file:
data = json.load(read_file)
try:
uri = data["metadata"]["environment"]["uri"]
except KeyError:
return None
return uri; | 23.090909 | 52 | 0.637795 |
7ff337ef05d92cfb69dbf4d6cf775f9f61fd0f44 | 4,493 | py | Python | tempest/api/object_storage/test_container_quotas.py | azorge/tempest | 549dfc93fb7e3d6d8566064a60a6069deae5c8eb | [
"Apache-2.0"
] | 1 | 2021-05-21T08:24:02.000Z | 2021-05-21T08:24:02.000Z | tempest/api/object_storage/test_container_quotas.py | azorge/tempest | 549dfc93fb7e3d6d8566064a60a6069deae5c8eb | [
"Apache-2.0"
] | null | null | null | tempest/api/object_storage/test_container_quotas.py | azorge/tempest | 549dfc93fb7e3d6d8566064a60a6069deae5c8eb | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
QUOTA_BYTES = 10
QUOTA_COUNT = 3
class ContainerQuotasTest(base.BaseObjectTest):
"""Attempts to test the perfect behavior of quotas in a container."""
def setUp(self):
"""Creates and sets a container with quotas.
Quotas are set by adding meta values to the container,
and are validated when set:
- X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
self.container_name = self.create_container()
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
self.container_client.update_container_metadata(
self.container_name, metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
self.delete_containers()
super(ContainerQuotasTest, self).tearDown()
@decorators.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES)
nbefore = self._get_bytes_used()
resp, _ = self.object_client.create_object(
self.container_name, object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
nafter = self._get_bytes_used()
self.assertEqual(nbefore + len(data), nafter)
@decorators.idempotent_id('22eeeb2b-3668-4160-baef-44790f65a5a0')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object larger than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES + 1)
nbefore = self._get_bytes_used()
self.assertRaises(lib_exc.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
@decorators.idempotent_id('3a387039-697a-44fc-a9c0-935de31f426b')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
name = data_utils.rand_name(name="TestObject")
self.object_client.create_object(self.container_name, name, "")
nbefore = self._get_object_count()
self.assertEqual(nbefore, QUOTA_COUNT)
self.assertRaises(lib_exc.OverLimit,
self.object_client.create_object,
self.container_name, "OverQuotaObject", "")
nafter = self._get_object_count()
self.assertEqual(nbefore, nafter)
def _get_container_metadata(self):
resp, _ = self.container_client.list_container_metadata(
self.container_name)
return resp
def _get_object_count(self):
resp = self._get_container_metadata()
return int(resp["x-container-object-count"])
def _get_bytes_used(self):
resp = self._get_container_metadata()
return int(resp["x-container-bytes-used"])
| 38.732759 | 78 | 0.674828 |
821f95673b14b42b3262bcb651b1d66d62f52858 | 9,079 | py | Python | convolutional_attention/conv_attentional_learner.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
] | null | null | null | convolutional_attention/conv_attentional_learner.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
] | null | null | null | convolutional_attention/conv_attentional_learner.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
] | null | null | null | import cPickle
import logging
from math import ceil
import sys
import os
import time
import numpy as np
# from experimenter import ExperimentLogger
from convolutional_attention.conv_attentional_model import ConvolutionalAttentionalModel
from convolutional_attention.f1_evaluator import F1Evaluator
from convolutional_attention.token_naming_data import TokenCodeNamingData
class ConvolutionalAttentionalLearner:
def __init__(self, hyperparameters):
self.name_cx_size = hyperparameters["name_cx_size"]
self.hyperparameters = hyperparameters
self.naming_data = None
self.padding_size = self.hyperparameters["layer1_window_size"] + self.hyperparameters["layer2_window_size"] + self.hyperparameters["layer3_window_size"] - 3
self.parameters = None
def train(self, input_file, patience=5, max_epochs=1000, minibatch_size=500):
assert self.parameters is None, "Model is already trained"
print "Extracting data..."
# Get data (train, validation)
train_data, validation_data, self.naming_data = TokenCodeNamingData.get_data_in_convolution_format_with_validation(input_file, self.name_cx_size, .92, self.padding_size)
train_name_targets, train_name_contexts, train_code_sentences, train_original_name_ids = train_data
val_name_targets, val_name_contexts, val_code_sentences, val_original_name_ids = validation_data
# Create theano model and train
model = ConvolutionalAttentionalModel(self.hyperparameters, len(self.naming_data.all_tokens_dictionary), len(self.naming_data.name_dictionary),
self.naming_data.name_empirical_dist)
def compute_validation_score_names():
return model.log_prob_with_targets(val_name_contexts, val_code_sentences, val_name_targets)
best_params = [p.get_value() for p in model.train_parameters]
best_name_score = float('-inf')
ratios = np.zeros(len(model.train_parameters))
n_batches = 0
epochs_not_improved = 0
print "[%s] Starting training..." % time.asctime()
for i in xrange(max_epochs):
start_time = time.time()
name_ordering = np.arange(len(train_name_targets), dtype=np.int32)
np.random.shuffle(name_ordering)
sys.stdout.write(str(i))
num_minibatches = min(int(ceil(float(len(train_name_targets)) / minibatch_size))-1, 25) # Clump minibatches
for j in xrange(num_minibatches):
name_batch_ids = name_ordering[j * minibatch_size:(j + 1) * minibatch_size]
batch_code_sentences = train_code_sentences[name_batch_ids]
for k in xrange(len(name_batch_ids)):
out = model.grad_accumulate(train_name_contexts[name_batch_ids[k]], batch_code_sentences[k], train_name_targets[name_batch_ids[k]])
assert len(name_batch_ids) > 0
ratios += model.grad_step()
n_batches += 1
sys.stdout.write("|")
if i % 1 == 0:
name_ll = compute_validation_score_names()
if name_ll > best_name_score:
best_name_score = name_ll
best_params = [p.get_value() for p in model.train_parameters]
print "At %s validation: name_ll=%s [best so far]" % (i, name_ll)
epochs_not_improved = 0
else:
print "At %s validation: name_ll=%s" % (i, name_ll)
epochs_not_improved += 1
for k in xrange(len(model.train_parameters)):
print "%s: %.0e" % (model.train_parameters[k].name, ratios[k] / n_batches)
n_batches = 0
ratios = np.zeros(len(model.train_parameters))
if epochs_not_improved >= patience:
print "Not improved for %s epochs. Stopping..." % patience
break
elapsed = int(time.time() - start_time)
print "Epoch elapsed %sh%sm%ss" % ((elapsed / 60 / 60) % 60, (elapsed / 60) % 60, elapsed % 60)
print "[%s] Training Finished..." % time.asctime()
self.parameters = best_params
model.restore_parameters(best_params)
self.model = model
def predict_name(self, code_features):
assert self.parameters is not None, "Model is not trained"
next_name_log_probs = lambda cx: self.model.log_prob(cx, code_features)
return self.naming_data.get_suggestions_given_name_prefix(next_name_log_probs, self.name_cx_size)
def save(self, filename):
model_tmp = self.model
del self.model
with open(filename, 'wb') as f:
cPickle.dump(self, f, cPickle.HIGHEST_PROTOCOL)
self.model = model_tmp
@staticmethod
def load(filename):
"""
:type filename: str
:rtype: ConvolutionalAttentionalLearner
"""
with open(filename, 'rb') as f:
learner = cPickle.load(f)
learner.model = ConvolutionalAttentionalModel(learner.hyperparameters, len(learner.naming_data.all_tokens_dictionary),
len(learner.naming_data.name_dictionary), learner.naming_data.name_empirical_dist)
learner.model.restore_parameters(learner.parameters)
return learner
def get_attention_vector(self, name_cx, code_toks):
attention_vector = self.model.attention_weights(name_cx,
code_toks.astype(np.int32))
return attention_vector
def run_from_config(params, *args):
if len(args) < 2:
print "No input file or test file given: %s:%s" % (args, len(args))
sys.exit(-1)
input_file = args[0]
test_file = args[1]
if len(args) > 2:
num_epochs = int(args[2])
else:
num_epochs = 1000
params["D"] = 2 ** params["logD"]
params["conv_layer1_nfilters"] = 2 ** params["log_conv_layer1_nfilters"]
params["conv_layer2_nfilters"] = 2 ** params["log_conv_layer2_nfilters"]
model = ConvolutionalAttentionalLearner(params)
model.train(input_file, max_epochs=num_epochs)
test_data, original_names = model.naming_data.get_data_in_convolution_format(test_file, model.name_cx_size, model.padding_size)
test_name_targets, test_name_contexts, test_code_sentences, test_original_name_ids = test_data
ids, unique_idx = np.unique(test_original_name_ids, return_index=True)
eval = F1Evaluator(model)
point_suggestion_eval = eval.compute_names_f1(test_code_sentences[unique_idx], original_names,
model2.naming_data.all_tokens_dictionary.get_all_names())
return -point_suggestion_eval.get_f1_at_all_ranks()[1]
if __name__ == "__main__":
if len(sys.argv) < 5:
print 'Usage <input_file> <max_num_epochs> d <test_file>'
sys.exit(-1)
logging.basicConfig(level=logging.INFO)
input_file = sys.argv[1]
max_num_epochs = int(sys.argv[2])
params = {
"D": int(sys.argv[3]),
"name_cx_size": 1,
"conv_layer1_nfilters": 64,
"conv_layer2_nfilters": 16,
"layer1_window_size": 6,
"layer2_window_size": 15,
"layer3_window_size": 14,
"log_code_rep_init_scale": -1.34,
"log_name_rep_init_scale": -4.9,
"log_layer1_init_scale": -1,
"log_layer2_init_scale": -3.4,
"log_layer3_init_scale": -1.8,
"log_name_cx_init_scale": -1.3,
"log_learning_rate": -2.95,
"rmsprop_rho": .98,
"momentum": 0.9,
"dropout_rate": 0.25,
"grad_clip":1
}
params["train_file"] = input_file
params["test_file"] = sys.argv[4]
# with ExperimentLogger("ConvolutionalAttentionalLearner", params) as experiment_log:
model = ConvolutionalAttentionalLearner(params)
model.train(input_file, max_epochs=max_num_epochs)
model.save("convolutional_att_model" + os.path.basename(params["train_file"]) + ".pkl")
model2 = ConvolutionalAttentionalLearner.load("convolutional_att_model" + os.path.basename(params["train_file"]) + ".pkl")
test_data, original_names = model2.naming_data.get_data_in_convolution_format(sys.argv[4], model2.name_cx_size, model2.padding_size)
test_name_targets, test_name_contexts, test_code_sentences, test_original_name_ids = test_data
name_ll = model2.model.log_prob_with_targets(test_name_contexts, test_code_sentences, test_name_targets)
print "Test name_ll=%s" % name_ll
ids, unique_idx = np.unique(test_original_name_ids, return_index=True)
eval = F1Evaluator(model2)
point_suggestion_eval = eval.compute_names_f1(test_code_sentences[unique_idx], original_names,
model2.naming_data.all_tokens_dictionary.get_all_names())
print point_suggestion_eval
results = point_suggestion_eval.get_f1_at_all_ranks()
print results
# experiment_log.record_results({"f1_at_rank1": results[0], "f1_at_rank5":results[1]})
| 45.853535 | 177 | 0.666814 |
09b4422cd126c943b564f95775aff0b850731054 | 196 | py | Python | Language Proficiency/Python/Itertools/itertools.product()/cartesian_product.py | xuedong/hacker-rank | ce8a60f80c2c6935b427f9409d7e826ee0d26a89 | [
"MIT"
] | 1 | 2021-02-22T17:37:45.000Z | 2021-02-22T17:37:45.000Z | Language Proficiency/Python/Itertools/itertools.product()/cartesian_product.py | xuedong/hacker-rank | ce8a60f80c2c6935b427f9409d7e826ee0d26a89 | [
"MIT"
] | null | null | null | Language Proficiency/Python/Itertools/itertools.product()/cartesian_product.py | xuedong/hacker-rank | ce8a60f80c2c6935b427f9409d7e826ee0d26a89 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from itertools import product
if __name__ == "__main__":
A = list(map(int, input().split()))
B = list(map(int, input().split()))
print(*list(product(A, B)))
| 17.818182 | 39 | 0.612245 |
bdc011026c5f52049bb33f5ae96599e2af6abeec | 21,295 | py | Python | aes.py | DanijelStrbad/Python_AES | 27ab34967d4ca9512da8f1e13c01589263c42860 | [
"MIT"
] | null | null | null | aes.py | DanijelStrbad/Python_AES | 27ab34967d4ca9512da8f1e13c01589263c42860 | [
"MIT"
] | null | null | null | aes.py | DanijelStrbad/Python_AES | 27ab34967d4ca9512da8f1e13c01589263c42860 | [
"MIT"
] | null | null | null | # CBC
import numpy as np
gflt2 = [0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5]
gflt3 = [0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a]
gflt9 = [0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7,
0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc,
0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01,
0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91,
0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a,
0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa,
0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b,
0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b,
0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0,
0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30,
0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed,
0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d,
0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6,
0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46]
gflt11 = [0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69,
0xb0, 0xbb, 0xa6, 0xad, 0x9c, 0x97, 0x8a, 0x81, 0xe8, 0xe3, 0xfe, 0xf5, 0xc4, 0xcf, 0xd2, 0xd9,
0x7b, 0x70, 0x6d, 0x66, 0x57, 0x5c, 0x41, 0x4a, 0x23, 0x28, 0x35, 0x3e, 0x0f, 0x04, 0x19, 0x12,
0xcb, 0xc0, 0xdd, 0xd6, 0xe7, 0xec, 0xf1, 0xfa, 0x93, 0x98, 0x85, 0x8e, 0xbf, 0xb4, 0xa9, 0xa2,
0xf6, 0xfd, 0xe0, 0xeb, 0xda, 0xd1, 0xcc, 0xc7, 0xae, 0xa5, 0xb8, 0xb3, 0x82, 0x89, 0x94, 0x9f,
0x46, 0x4d, 0x50, 0x5b, 0x6a, 0x61, 0x7c, 0x77, 0x1e, 0x15, 0x08, 0x03, 0x32, 0x39, 0x24, 0x2f,
0x8d, 0x86, 0x9b, 0x90, 0xa1, 0xaa, 0xb7, 0xbc, 0xd5, 0xde, 0xc3, 0xc8, 0xf9, 0xf2, 0xef, 0xe4,
0x3d, 0x36, 0x2b, 0x20, 0x11, 0x1a, 0x07, 0x0c, 0x65, 0x6e, 0x73, 0x78, 0x49, 0x42, 0x5f, 0x54,
0xf7, 0xfc, 0xe1, 0xea, 0xdb, 0xd0, 0xcd, 0xc6, 0xaf, 0xa4, 0xb9, 0xb2, 0x83, 0x88, 0x95, 0x9e,
0x47, 0x4c, 0x51, 0x5a, 0x6b, 0x60, 0x7d, 0x76, 0x1f, 0x14, 0x09, 0x02, 0x33, 0x38, 0x25, 0x2e,
0x8c, 0x87, 0x9a, 0x91, 0xa0, 0xab, 0xb6, 0xbd, 0xd4, 0xdf, 0xc2, 0xc9, 0xf8, 0xf3, 0xee, 0xe5,
0x3c, 0x37, 0x2a, 0x21, 0x10, 0x1b, 0x06, 0x0d, 0x64, 0x6f, 0x72, 0x79, 0x48, 0x43, 0x5e, 0x55,
0x01, 0x0a, 0x17, 0x1c, 0x2d, 0x26, 0x3b, 0x30, 0x59, 0x52, 0x4f, 0x44, 0x75, 0x7e, 0x63, 0x68,
0xb1, 0xba, 0xa7, 0xac, 0x9d, 0x96, 0x8b, 0x80, 0xe9, 0xe2, 0xff, 0xf4, 0xc5, 0xce, 0xd3, 0xd8,
0x7a, 0x71, 0x6c, 0x67, 0x56, 0x5d, 0x40, 0x4b, 0x22, 0x29, 0x34, 0x3f, 0x0e, 0x05, 0x18, 0x13,
0xca, 0xc1, 0xdc, 0xd7, 0xe6, 0xed, 0xf0, 0xfb, 0x92, 0x99, 0x84, 0x8f, 0xbe, 0xb5, 0xa8, 0xa3]
gflt13 = [0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b,
0xd0, 0xdd, 0xca, 0xc7, 0xe4, 0xe9, 0xfe, 0xf3, 0xb8, 0xb5, 0xa2, 0xaf, 0x8c, 0x81, 0x96, 0x9b,
0xbb, 0xb6, 0xa1, 0xac, 0x8f, 0x82, 0x95, 0x98, 0xd3, 0xde, 0xc9, 0xc4, 0xe7, 0xea, 0xfd, 0xf0,
0x6b, 0x66, 0x71, 0x7c, 0x5f, 0x52, 0x45, 0x48, 0x03, 0x0e, 0x19, 0x14, 0x37, 0x3a, 0x2d, 0x20,
0x6d, 0x60, 0x77, 0x7a, 0x59, 0x54, 0x43, 0x4e, 0x05, 0x08, 0x1f, 0x12, 0x31, 0x3c, 0x2b, 0x26,
0xbd, 0xb0, 0xa7, 0xaa, 0x89, 0x84, 0x93, 0x9e, 0xd5, 0xd8, 0xcf, 0xc2, 0xe1, 0xec, 0xfb, 0xf6,
0xd6, 0xdb, 0xcc, 0xc1, 0xe2, 0xef, 0xf8, 0xf5, 0xbe, 0xb3, 0xa4, 0xa9, 0x8a, 0x87, 0x90, 0x9d,
0x06, 0x0b, 0x1c, 0x11, 0x32, 0x3f, 0x28, 0x25, 0x6e, 0x63, 0x74, 0x79, 0x5a, 0x57, 0x40, 0x4d,
0xda, 0xd7, 0xc0, 0xcd, 0xee, 0xe3, 0xf4, 0xf9, 0xb2, 0xbf, 0xa8, 0xa5, 0x86, 0x8b, 0x9c, 0x91,
0x0a, 0x07, 0x10, 0x1d, 0x3e, 0x33, 0x24, 0x29, 0x62, 0x6f, 0x78, 0x75, 0x56, 0x5b, 0x4c, 0x41,
0x61, 0x6c, 0x7b, 0x76, 0x55, 0x58, 0x4f, 0x42, 0x09, 0x04, 0x13, 0x1e, 0x3d, 0x30, 0x27, 0x2a,
0xb1, 0xbc, 0xab, 0xa6, 0x85, 0x88, 0x9f, 0x92, 0xd9, 0xd4, 0xc3, 0xce, 0xed, 0xe0, 0xf7, 0xfa,
0xb7, 0xba, 0xad, 0xa0, 0x83, 0x8e, 0x99, 0x94, 0xdf, 0xd2, 0xc5, 0xc8, 0xeb, 0xe6, 0xf1, 0xfc,
0x67, 0x6a, 0x7d, 0x70, 0x53, 0x5e, 0x49, 0x44, 0x0f, 0x02, 0x15, 0x18, 0x3b, 0x36, 0x21, 0x2c,
0x0c, 0x01, 0x16, 0x1b, 0x38, 0x35, 0x22, 0x2f, 0x64, 0x69, 0x7e, 0x73, 0x50, 0x5d, 0x4a, 0x47,
0xdc, 0xd1, 0xc6, 0xcb, 0xe8, 0xe5, 0xf2, 0xff, 0xb4, 0xb9, 0xae, 0xa3, 0x80, 0x8d, 0x9a, 0x97]
gflt14 = [0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a,
0xe0, 0xee, 0xfc, 0xf2, 0xd8, 0xd6, 0xc4, 0xca, 0x90, 0x9e, 0x8c, 0x82, 0xa8, 0xa6, 0xb4, 0xba,
0xdb, 0xd5, 0xc7, 0xc9, 0xe3, 0xed, 0xff, 0xf1, 0xab, 0xa5, 0xb7, 0xb9, 0x93, 0x9d, 0x8f, 0x81,
0x3b, 0x35, 0x27, 0x29, 0x03, 0x0d, 0x1f, 0x11, 0x4b, 0x45, 0x57, 0x59, 0x73, 0x7d, 0x6f, 0x61,
0xad, 0xa3, 0xb1, 0xbf, 0x95, 0x9b, 0x89, 0x87, 0xdd, 0xd3, 0xc1, 0xcf, 0xe5, 0xeb, 0xf9, 0xf7,
0x4d, 0x43, 0x51, 0x5f, 0x75, 0x7b, 0x69, 0x67, 0x3d, 0x33, 0x21, 0x2f, 0x05, 0x0b, 0x19, 0x17,
0x76, 0x78, 0x6a, 0x64, 0x4e, 0x40, 0x52, 0x5c, 0x06, 0x08, 0x1a, 0x14, 0x3e, 0x30, 0x22, 0x2c,
0x96, 0x98, 0x8a, 0x84, 0xae, 0xa0, 0xb2, 0xbc, 0xe6, 0xe8, 0xfa, 0xf4, 0xde, 0xd0, 0xc2, 0xcc,
0x41, 0x4f, 0x5d, 0x53, 0x79, 0x77, 0x65, 0x6b, 0x31, 0x3f, 0x2d, 0x23, 0x09, 0x07, 0x15, 0x1b,
0xa1, 0xaf, 0xbd, 0xb3, 0x99, 0x97, 0x85, 0x8b, 0xd1, 0xdf, 0xcd, 0xc3, 0xe9, 0xe7, 0xf5, 0xfb,
0x9a, 0x94, 0x86, 0x88, 0xa2, 0xac, 0xbe, 0xb0, 0xea, 0xe4, 0xf6, 0xf8, 0xd2, 0xdc, 0xce, 0xc0,
0x7a, 0x74, 0x66, 0x68, 0x42, 0x4c, 0x5e, 0x50, 0x0a, 0x04, 0x16, 0x18, 0x32, 0x3c, 0x2e, 0x20,
0xec, 0xe2, 0xf0, 0xfe, 0xd4, 0xda, 0xc8, 0xc6, 0x9c, 0x92, 0x80, 0x8e, 0xa4, 0xaa, 0xb8, 0xb6,
0x0c, 0x02, 0x10, 0x1e, 0x34, 0x3a, 0x28, 0x26, 0x7c, 0x72, 0x60, 0x6e, 0x44, 0x4a, 0x58, 0x56,
0x37, 0x39, 0x2b, 0x25, 0x0f, 0x01, 0x13, 0x1d, 0x47, 0x49, 0x5b, 0x55, 0x7f, 0x71, 0x63, 0x6d,
0xd7, 0xd9, 0xcb, 0xc5, 0xef, 0xe1, 0xf3, 0xfd, 0xa7, 0xa9, 0xbb, 0xb5, 0x9f, 0x91, 0x83, 0x8d]
sBox = [0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16]
invBox = [0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D]
# rcon = [0x00000000, 0x01000000, 0x02000000,
# 0x04000000, 0x08000000, 0x10000000,
# 0x20000000, 0x40000000, 0x80000000,
# 0x1b000000, 0x36000000]
rcon = [0, 1, 2, 4, 8, 16, 32, 64, 128, 27, 54, 108, 216, 171, 77, 154, 47, 94, 188, 99, 198, 151, 53, 106, 212, 179,
125, 250, 239, 197, 145, 57, 114, 228, 211, 189, 97, 194, 159, 37, 74, 148, 51, 102, 204, 131, 29, 58, 116, 232,
203, 141, 1, 2, 4, 8, 16, 32, 64, 128, 27, 54, 108, 216, 171, 77, 154, 47, 94, 188, 99, 198, 151, 53, 106, 212,
179, 125, 250, 239, 197, 145, 57, 114, 228, 211, 189, 97, 194, 159, 37, 74, 148, 51, 102, 204, 131, 29, 58, 116,
232, 203, 141, 1, 2, 4, 8, 16, 32, 64, 128, 27, 54, 108, 216, 171, 77, 154, 47, 94, 188, 99, 198, 151, 53, 106,
212, 179, 125, 250, 239, 197, 145, 57, 114, 228, 211, 189, 97, 194, 159, 37, 74, 148, 51, 102, 204, 131, 29, 58,
116, 232, 203, 141, 1, 2, 4, 8, 16, 32, 64, 128, 27, 54, 108, 216, 171, 77, 154, 47, 94, 188, 99, 198, 151, 53,
106, 212, 179, 125, 250, 239, 197, 145, 57, 114, 228, 211, 189, 97, 194, 159, 37, 74, 148, 51, 102, 204, 131,
29, 58, 116, 232, 203, 141, 1, 2, 4, 8, 16, 32, 64, 128, 27, 54, 108, 216, 171, 77, 154, 47, 94, 188, 99, 198,
151, 53, 106, 212, 179, 125, 250, 239, 197, 145, 57, 114, 228, 211, 189, 97, 194, 159, 37, 74, 148, 51, 102,
204, 131, 29, 58, 116, 232, 203, 141]
Nb = 4 # 4 [Blocks] = 16 [Bytes]
Nk = 4 # 4/6/8 [Blocks] = 16 [Bytes] (AES - 128/192/256)
Nr = 10 # 10/12/14 [Rounds] (AES - 128/192/256)
# ==================================================
def inputToMatrix(in1):
mat = [list(in1[i:i+4]) for i in range(0, 16, 4)]
return mat
def matrixToOutput(mat1):
arr = list((np.array(mat1)).reshape(-1))
return arr
def addRoundKey(state1, word1):
for i in range(3):
for j in range(4):
state1[i][j] = int(state1[i][j]) ^ int(word1[4*i+j])
return state1
def subBytes(state):
for i in range(4):
for j in range(4):
state[i][j] = sBox[state[i][j]]
return state
def invSubBytes(state):
for i in range(4):
for j in range(4):
state[i][j] = invBox[state[i][j]]
return state
def shiftRows(s):
s[0][1], s[1][1], s[2][1], s[3][1] = s[1][1], s[2][1], s[3][1], s[0][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[3][3], s[0][3], s[1][3], s[2][3]
return s
def invShiftRows(s):
s[0][1], s[1][1], s[2][1], s[3][1] = s[3][1], s[0][1], s[1][1], s[2][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[1][3], s[2][3], s[3][3], s[0][3]
return s
def mixColumns(state):
for i in range(4):
a = (state[i][0])
b = (state[i][1])
c = (state[i][2])
d = (state[i][3])
state[i][0] = gflt2[a] ^ gflt3[b] ^ c ^ d
state[i][1] = gflt2[b] ^ gflt3[c] ^ d ^ a
state[i][2] = gflt2[c] ^ gflt3[d] ^ a ^ b
state[i][3] = gflt2[d] ^ gflt3[a] ^ b ^ c
return state
def invMixColumns(state):
for i in range(4):
a = (state[i][0])
b = (state[i][1])
c = (state[i][2])
d = (state[i][3])
state[i][0] = (gflt14[a] ^ gflt11[b] ^ gflt13[c] ^ gflt9[d])
state[i][1] = (gflt14[b] ^ gflt11[c] ^ gflt13[d] ^ gflt9[a])
state[i][2] = (gflt14[c] ^ gflt11[d] ^ gflt13[a] ^ gflt9[b])
state[i][3] = (gflt14[d] ^ gflt11[a] ^ gflt13[b] ^ gflt9[c])
return state
# ==================================================
def subWord(word):
for i in range(4):
word[i] = sBox[word[i]]
return word
def rotWord(word):
word[0], word[1], word[2], word[3] = word[1], word[2], word[3], word[0]
return word
def keyExpansion(key):
i = 0
# word = [()]*44
word = []
# Nk = 4
while i < Nk:
# word[i] = (key[4*i], key[4*i+1], key[4*i+2], key[4*i+3])
word.append(key[4 * i + 0])
word.append(key[4 * i + 1])
word.append(key[4 * i + 2])
word.append(key[4 * i + 3])
i += 1
# Nk = 4
i = Nk + 1
temp = []
temp.append(word[0])
temp.append(word[1])
temp.append(word[2])
temp.append(word[3])
for i in range(4, 44):
if i % Nk == 0:
tmp1 = subWord(rotWord(temp[0:4]))
for k in range(4):
word.append(tmp1[k] ^ rcon[int(i / Nk)])
temp = []
temp.append(word[i - 1])
return word
# ==================================================
def prepareMsg(msg):
outMsg = [char for char in msg]
for i in range(len(msg)):
outMsg[i] = ord(msg[i])
return outMsg
def prepareCypher(c):
for i in range(len(c)):
c[i] = chr(c[i])
return c
def prepareKey(keyIn):
outKey = []
for i in range(16):
outKey.append(ord(keyIn[i]))
return outKey
def invMsg(m):
tmp = []
for i in range(int(len(m)/16), 0, -1):
tmp.extend(m[i*16-16:i*16])
return tmp
# ==================================================
def addPadding(block):
padding = 16 - (len(block) % 16)
for i in range(len(block), len(block)+padding, 1):
block.append(padding)
return block
def removePadding(block):
padding = block[len(block)-1]
for i in range(len(block)-padding, len(block), 1):
block[i] = 32
return block
# ==================================================
def Cipher(in1, word1):
state = inputToMatrix(in1)
word1 = keyExpansion(word1)
state = addRoundKey(state, word1[:Nb*4])
for i in range(1, 9, 1):
state = subBytes(state)
state = shiftRows(state)
state = mixColumns(state)
addRoundKey(state, word1[i*Nb:(i+1)*Nb*4])
state = subBytes(state)
state = shiftRows(state)
state = addRoundKey(state, word1[Nr*Nb:(Nr+1)*Nb*4])
state = matrixToOutput(state)
return state
def invCipher(in1, word1):
state = inputToMatrix(in1)
word1 = keyExpansion(word1)
state = addRoundKey(state, word1[Nr*Nb:(Nr+1)*Nb*4])
for i in range(8, 0, -1):
state = invShiftRows(state)
state = invSubBytes(state)
state = addRoundKey(state, word1[i*Nb:(i+1)*Nb*4])
state = invMixColumns(state)
state = invShiftRows(state)
state = invSubBytes(state)
state = addRoundKey(state, word1[:Nb*4])
state = matrixToOutput(state)
return state
# ==================================================
inMsg1 = "Some Super Secret Text"
inMsg2 = "qwertzuiopasdfgh"
inMsg3 = "ab"
key1 = "0123456789012345"
key2 = "qwertyuiopasdfgh"
iv1 = "esyrdxtfczgvuhbi"
inCyp1 = [74, 230, 229, 132, 137, 238, 202, 220, 45, 243, 152, 91, 37, 253, 163, 47, 45, 153, 223, 97, 107, 202, 79,
173, 94, 17, 119, 172, 129, 239, 132, 119]
inCyp2 = [181, 60, 57, 56, 246, 57, 135, 119, 103, 118, 38, 33, 156, 80, 71, 31, 165, 173, 42, 128, 193, 105, 232, 227,
244, 137, 203, 229, 79, 26, 69, 165]
inCyp3 = [154, 96, 187, 157, 2, 74, 68, 170, 65, 214, 234, 101, 147, 153, 165, 191]
# file1 = open("secret.txt", "r")
# msgFile1 = file1.read(-1)
# file1.close()
# ==================================================
# ord( . . . ), chr( . . . ), hex( . . . )
def main():
print("AES-128")
encDec = 1 # 0 - encrypt, 1 - decrypt
if encDec == 0: # 0 - encrypt
mes123 = inMsg1
print(mes123)
m = prepareMsg(mes123)
k = prepareKey(key1)
iv = prepareKey(iv1)
nBlocks = len(m) / 16
nBlocks += 1
m = addPadding(m)
print("len = ", len(m))
c = []
for i in range(16):
c.append(iv[i] ^ m[i])
c = Cipher(c, k)
for j in range(1, int(nBlocks), 1):
tmp = []
for i in range(16):
tmp.append(m[j*16 + i] ^ c[(j-1)*16 + i])
c.extend(Cipher(tmp, k))
print("Int cypher list:")
print(str(c))
c = prepareCypher(c)
print("String cypher:")
print(c)
cOut = ''.join(str(i) for i in c)
print(cOut)
print(len(cOut))
elif encDec == 1: # 1 - decrypt
print(prepareMsg(inMsg1))
inCyp = inCyp3
print(inCyp)
k = prepareKey(key1)
iv = prepareKey(iv1)
nBlocks = len(inCyp) / 16
m = []
for j in range(int(nBlocks), 1, -1):
tmp = []
tmp.extend(invCipher(inCyp[(j-1)*16:j*16], k))
for i in range(16):
m.append(tmp[i] ^ inCyp[(j-2)*16+i])
tmp = invCipher(inCyp[:16], k)
for i in range(16):
m.append(iv[i] ^ tmp[i])
m = invMsg(m)
m = removePadding(m)
print(len(m))
print("int msg:")
print(m)
mStr = prepareCypher(m)
print(mStr)
if __name__ == '__main__':
main()
| 44.272349 | 120 | 0.592956 |
28360eb6d137ac5700f8833f52700151d61c4daa | 101,026 | py | Python | tests/jobs/test_scheduler_job.py | YouZhengChuan/airflow | 064a66a7dcd5e0f064b3ac1138f07cbcf932abbc | [
"Apache-2.0"
] | 1 | 2021-01-06T21:22:13.000Z | 2021-01-06T21:22:13.000Z | tests/jobs/test_scheduler_job.py | YouZhengChuan/airflow | 064a66a7dcd5e0f064b3ac1138f07cbcf932abbc | [
"Apache-2.0"
] | null | null | null | tests/jobs/test_scheduler_job.py | YouZhengChuan/airflow | 064a66a7dcd5e0f064b3ac1138f07cbcf932abbc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
import shutil
import unittest
from tempfile import mkdtemp
import psutil
import six
from parameterized import parameterized
import airflow.example_dags
from airflow import AirflowException, models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs import BackfillJob, SchedulerJob
from airflow.models import DAG, DagBag, DagModel, DagRun, Pool, SlaMiss, TaskInstance as TI, errors
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag
from airflow.utils.dates import days_ago
from airflow.utils.db import create_session, provide_session
from airflow.utils.file import list_py_file_paths
from airflow.utils.state import State
from tests.compat import MagicMock, Mock, PropertyMock, mock, patch
from tests.test_core import TEST_DAG_FOLDER
from tests.test_utils.db import (
clear_db_dags, clear_db_errors, clear_db_pools, clear_db_runs, clear_db_sla_miss, set_default_pool_slots,
)
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
class TestSchedulerJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag()
cls.old_val = None
if conf.has_option('core', 'load_examples'):
cls.old_val = conf.get('core', 'load_examples')
conf.set('core', 'load_examples', 'false')
@classmethod
def tearDownClass(cls):
if cls.old_val is not None:
conf.set('core', 'load_examples', cls.old_val)
else:
conf.remove_option('core', 'load_examples')
def test_is_alive(self):
job = SchedulerJob(None, heartrate=10, state=State.RUNNING)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
self.assertTrue(job.is_alive())
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
self.assertFalse(job.is_alive())
job.state = State.SUCCESS
job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
self.assertFalse(job.is_alive(), "Completed jobs even with recent heartbeat should not be alive")
def run_single_scheduler_loop_with_no_dags(self, dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
executor=self.null_exec,
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1,
executor=MockExecutor(do_update=False))
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
@mock.patch('airflow.stats.Stats.incr')
def test_process_executor_events(self, mock_stats_incr):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = MockExecutor(do_update=False)
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
mock_stats_incr.assert_called_once_with('scheduler.tasks.killed_externally')
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_find_executable_task_instances_in_default_pool(self):
set_default_pool_slots(1)
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_in_default_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
t1 = DummyOperator(dag=dag, task_id='dummy1')
t2 = DummyOperator(dag=dag, task_id='dummy2')
dagbag = self._make_simple_dag_bag([dag])
executor = MockExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
ti1 = TI(task=t1, execution_date=dr1.execution_date)
ti2 = TI(task=t2, execution_date=dr2.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.commit()
# Two tasks w/o pool up for execution and our default pool size is 1
res = scheduler._find_executable_task_instances(
dagbag,
states=(State.SCHEDULED,),
session=session)
self.assertEqual(1, len(res))
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
# One task w/o pool up for execution and one task task running
res = scheduler._find_executable_task_instances(
dagbag,
states=(State.SCHEDULED,),
session=session)
self.assertEqual(0, len(res))
session.close()
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
executor = MockExecutor(do_update=True)
scheduler = SchedulerJob(executor=executor)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_1.state = State.QUEUED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SUCCESS
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
executor.queued_tasks[ti1_1.key] = ti1_1
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED, State.QUEUED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for _ in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = MockExecutor(do_update=False)
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
executor = MockExecutor(do_update=False)
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = MockExecutor(do_update=False)
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None): # pylint: disable=unused-argument
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
for tid, state in expected_task_states.items():
if state != State.FAILED:
continue
self.null_exec.mock_task_fail(dag_id, tid, ex_date)
try:
dag.run(start_date=ex_date, end_date=ex_date, executor=self.null_exec, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = scheduler.create_dag_run(dag)
self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', DEFAULT_DATE)
with self.assertRaises(AirflowException):
dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
scheduler = SchedulerJob(
dag_id,
num_runs=1,
executor=self.null_exec,
subdir=dag.fileloc)
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
bf_exec = MockExecutor()
backfill = BackfillJob(
executor=bf_exec,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
self.assertListEqual(
[
((dag.dag_id, 'dummy', DEFAULT_DATE, 1), State.SUCCESS),
],
bf_exec.sorted_tasks
)
session.commit()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=dag.fileloc,
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
self.assertListEqual([], self.null_exec.sorted_tasks)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.append.assert_called_once_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear(session=session)
dag.start_date = None
dr = scheduler.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
mock_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=mock_list)
mock_list.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
task_instances_list = Mock()
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
task_instances_list = Mock()
# and schedule them in, so we can check how many
# tasks are put on the task_instances_list (should be one, not 3)
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
task_instances_list.append.assert_called_once_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob(executor=self.null_exec)
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
task_instances_list = []
scheduler._process_task_instances(dag, task_instances_list=task_instances_list)
self.assertEqual(len(task_instances_list), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in task_instances_list:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
self.assertEqual(len(scheduler.executor.queued_tasks), 0, "Check test pre-condition")
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED, State.UP_FOR_RETRY),
session=session)
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(mock_dagbag, mock_collect_dags):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_scheduler_sla_miss_callback_invalid_sla(self):
"""
Test that the scheduler does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_sent_notification(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log().exception.assert_called_once_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch('airflow.jobs.scheduler_job.send_email')
def test_scheduler_only_collect_emails_from_sla_missed_tasks(self, mock_send_email):
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
email1 = 'test1@test.com'
task = DummyOperator(task_id='sla_missed',
dag=dag,
owner='airflow',
email=email1,
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
email2 = 'test2@test.com'
DummyOperator(task_id='sla_not_missed',
dag=dag,
owner='airflow',
email=email2)
session.merge(SlaMiss(task_id='sla_missed',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
self.assertTrue(1, len(mock_send_email.call_args_list))
send_email_to = mock_send_email.call_args_list[0][0][0]
self.assertIn(email1, send_email_to)
self.assertNotIn(email2, send_email_to)
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_once_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = MockExecutor(do_update=False)
dagbag = DagBag(executor=executor, dag_folder=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(mock_dagbag, mock_collect_dags):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task, ignore_ti_state=False):
try:
task.run(ignore_ti_state=ignore_ti_state)
except AirflowException:
pass
ti_tuple = next(iter(executor.queued_tasks.values()))
(_, _, _, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# At this point, scheduler has tried to schedule the task once and
# heartbeated the executor once, which moved the state of the task from
# SCHEDULED to QUEUED and then to SCHEDULED, to fail the task execution
# we need to ignore the TI state as SCHEDULED is not a valid state to start
# executing task.
run_with_error(ti, ignore_ti_state=True)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertIn(ti.state, [State.RUNNING, State.SUCCESS])
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER, "..", "dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
executor=self.null_exec,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAG_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAG_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, _, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAG_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
def test_process_dags_not_create_dagrun_for_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
scheduler = SchedulerJob()
scheduler._process_task_instances = mock.MagicMock()
scheduler.manage_slas = mock.MagicMock()
scheduler._process_dags(self.dagbag, [dag] + dag.subdags, None)
with create_session() as session:
sub_dagruns = (
session
.query(DagRun)
.filter(DagRun.dag_id == dag.subdags[0].dag_id)
.count()
)
self.assertEqual(0, sub_dagruns)
parent_dagruns = (
session
.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id)
.count()
)
self.assertGreater(parent_dagruns, 0)
def test_find_dags_to_run_includes_subdags(self):
dag = self.dagbag.get_dag('test_subdag_operator')
print(self.dagbag.dag_ids)
print(self.dagbag.dag_folder)
self.assertGreater(len(dag.subdags), 0)
scheduler = SchedulerJob()
dags = scheduler._find_dags_to_process(self.dagbag.dags.values(), paused_dag_ids=())
self.assertIn(dag, dags)
for subdag in dag.subdags:
self.assertIn(subdag, dags)
def test_find_dags_to_run_skip_paused_dags(self):
dagbag = DagBag(include_examples=False)
dag = dagbag.get_dag('test_subdag_operator')
scheduler = SchedulerJob()
dags = scheduler._find_dags_to_process(dagbag.dags.values(), paused_dag_ids=[dag.dag_id])
self.assertNotIn(dag, dags)
| 36.32722 | 110 | 0.617514 |
712e3b5464b089391e0b26556fc1a1870a9020f1 | 1,094 | py | Python | kubernetes/test/test_v1beta1_pod_disruption_budget_status.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_v1beta1_pod_disruption_budget_status.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_pod_disruption_budget_status.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_pod_disruption_budget_status import V1beta1PodDisruptionBudgetStatus
class TestV1beta1PodDisruptionBudgetStatus(unittest.TestCase):
""" V1beta1PodDisruptionBudgetStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1PodDisruptionBudgetStatus(self):
"""
Test V1beta1PodDisruptionBudgetStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_pod_disruption_budget_status.V1beta1PodDisruptionBudgetStatus()
pass
if __name__ == '__main__':
unittest.main()
| 24.311111 | 113 | 0.744059 |
71a2256c4d0b640e9ab968c0661d77c55ce4ccd6 | 135 | py | Python | tensorbuilder/patches/__init__.py | cgarciae/tensorbuilder | f8e0b19c09deaaea67611d9df51218e4a9cd705a | [
"MIT"
] | 109 | 2016-06-05T21:51:53.000Z | 2021-09-06T07:00:26.000Z | tensorbuilder/patches/__init__.py | cgarciae/tensorbuilder | f8e0b19c09deaaea67611d9df51218e4a9cd705a | [
"MIT"
] | 6 | 2016-06-06T01:05:40.000Z | 2016-09-19T19:30:49.000Z | tensorbuilder/patches/__init__.py | cgarciae/tensorbuilder | f8e0b19c09deaaea67611d9df51218e4a9cd705a | [
"MIT"
] | 13 | 2016-06-06T14:15:31.000Z | 2019-11-04T23:33:56.000Z | #import layers_patch
import tensorflow_patch
import summaries_patch
import layers_patch
import rnn_utilities_patch
import custom_patch
| 19.285714 | 26 | 0.903704 |
38caa85849805251c022ca7672501e95fe5f98d6 | 11,640 | py | Python | MetamorphicTests/all_mutants/sales_forecasting_file/172.py | anuragbms/Sales-forecasting-with-RNNs | 22b4639ecbb48381af53326ace94a3538201b586 | [
"Apache-2.0"
] | null | null | null | MetamorphicTests/all_mutants/sales_forecasting_file/172.py | anuragbms/Sales-forecasting-with-RNNs | 22b4639ecbb48381af53326ace94a3538201b586 | [
"Apache-2.0"
] | null | null | null | MetamorphicTests/all_mutants/sales_forecasting_file/172.py | anuragbms/Sales-forecasting-with-RNNs | 22b4639ecbb48381af53326ace94a3538201b586 | [
"Apache-2.0"
] | 1 | 2022-02-06T14:59:43.000Z | 2022-02-06T14:59:43.000Z | def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('mutpy', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis') | 31.206434 | 232 | 0.629725 |
37360ec7bdfb6626f80397e83b1d863bc1bb75b2 | 1,699 | py | Python | train_bilstm_crf.py | Qibie/project001 | df9d03d438b5cdfa652f1fa51846aa50b203c1b6 | [
"Apache-2.0"
] | null | null | null | train_bilstm_crf.py | Qibie/project001 | df9d03d438b5cdfa652f1fa51846aa50b203c1b6 | [
"Apache-2.0"
] | null | null | null | train_bilstm_crf.py | Qibie/project001 | df9d03d438b5cdfa652f1fa51846aa50b203c1b6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import numpy as np
from bilstm_crf import BiLSTM_CRF
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau,\
TensorBoard
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
char_embedding_mat = np.load('data/char_embedding_matrix.npy')
X_train = np.load('data/X_train.npy')
X_dev = np.load('data/X_dev.npy')
y_train = np.load('data/y_train.npy')
y_dev = np.load('data/y_dev.npy')
# ner_model = BiLSTM_CRF(n_input=200, n_vocab=char_embedding_mat.shape[0],
# n_embed=100, embedding_mat=char_embedding_mat,
# keep_prob=0.5, n_lstm=100, keep_prob_lstm=0.8,
# n_entity=7, optimizer='adam', batch_size=64, epochs=500)
ner_model = BiLSTM_CRF(n_input=200, n_vocab=char_embedding_mat.shape[0],
n_embed=100, embedding_mat=char_embedding_mat,
keep_prob=0.5, n_lstm=256, keep_prob_lstm=0.6,
n_entity=7, optimizer='adam', batch_size=16, epochs=500)
cp_folder, cp_file = 'checkpoints', 'bilstm_crf_weights_best.hdf5'
log_filepath = 'logs/bilstm_crf_summaries'
cb = [ModelCheckpoint(os.path.join(cp_folder, cp_file), monitor='val_loss',
verbose=1, save_best_only=True, save_weights_only=True, mode='min'),
EarlyStopping(min_delta=1e-8, patience=10, mode='min'),
ReduceLROnPlateau(factor=0.2, patience=6, verbose=0, mode='min',
epsilon=1e-6, cooldown=4, min_lr=1e-8),
TensorBoard(log_dir=log_filepath, write_graph=True, write_images=True,
histogram_freq=1)]
ner_model.train_attention(X_train, y_train, X_dev, y_dev, cb)
| 44.710526 | 90 | 0.666274 |
b9ec7cce20fefabc534e6df6ca989c3c42cca8f1 | 3,576 | py | Python | pmp/rules/weakly_separable.py | Koozco/pmp | 3ad96b01afb2ce6644eb1917ae88ee6d72adc8cf | [
"MIT"
] | 1 | 2020-03-30T18:56:02.000Z | 2020-03-30T18:56:02.000Z | pmp/rules/weakly_separable.py | Koozco/pmp | 3ad96b01afb2ce6644eb1917ae88ee6d72adc8cf | [
"MIT"
] | 9 | 2018-11-19T00:04:52.000Z | 2022-03-11T23:51:18.000Z | pmp/rules/weakly_separable.py | Koozco/pmp | 3ad96b01afb2ce6644eb1917ae88ee6d72adc8cf | [
"MIT"
] | 2 | 2018-12-02T10:43:34.000Z | 2020-03-30T10:15:50.000Z | from itertools import combinations
from .tie_breaking import any_winner
from .rule import Rule
class WeaklySeparable(Rule):
"""
Weakly Separable scoring rule
This is base class for all weakly separable scoring rules
"""
def __init__(self, weights=None, tie_break=any_winner):
"""
:param weights: List of weights that single voter assigns to the corresponding candidates
:type weights: List
:param tie_break: Callable
"""
Rule.__init__(self, tie_break)
self.weights = weights
def compute_candidate_scores(self, k, profile):
for pref in profile.preferences:
for n in range(len(pref.order)):
candidate = pref.order[n]
if n >= len(self.weights):
break
weight = self.weights[n]
if candidate in profile.scores:
profile.scores[candidate] += weight
else:
profile.scores[candidate] = weight
def compute_score(self, candidate, k, profile):
score = 0
for pref in profile.preferences:
i = pref.order.index(candidate)
weight = self.weights[i] if i < len(self.weights) else 0
score += weight
return score
def get_committees(self, k, candidates_with_score):
"""
:param k: Size of committee
:type k: Number
:param candidates_with_score: Dictionary with lists of candidates who achieved given score
:type candidates_with_score: Dict[Number, List[Number]]
:return: List[List]
Find all winning committees
"""
all_scores = candidates_with_score.keys()
decreasing_scores = sorted(all_scores, reverse=True)
committees = []
score_index = 0
committee = []
committee_size = 0
while committee_size < k:
score = decreasing_scores[score_index]
if committee_size + len(candidates_with_score[score]) < k:
committee += candidates_with_score[score]
committee_size += len(candidates_with_score[score])
else:
complement_size = k - committee_size
if self.tie_break == any_winner:
complement = candidates_with_score[score][:complement_size]
committee += complement
committee_size += complement_size
else:
complements = list(combinations(candidates_with_score[score], complement_size))
for complement in complements:
committees.append(committee + list(complement))
committee_size += complement_size
score_index += 1
if len(committees) == 0:
committees.append(committee)
return committees
def find_committee(self, k, profile):
if self.weights is None:
raise Exception("Weights not set.")
profile.clean_scores()
self.compute_candidate_scores(k, profile)
profile.candidates_with_score = {}
for cand_id in range(len(profile.candidates)):
score = profile.scores[cand_id]
if profile.candidates_with_score.get(score) is None:
profile.candidates_with_score[score] = []
profile.candidates_with_score[score].append(cand_id)
committees = self.get_committees(k, profile.candidates_with_score)
committee = self.tie_break(committees)
return committee
| 35.76 | 99 | 0.600391 |
9b61b2128871f128c69332162d8238da2f9d7812 | 7,274 | py | Python | venus/misc/observable.py | nagylzs/python-venus-lib | 336d20532c32e874ab0a43cf866092b9e55dded5 | [
"Apache-2.0"
] | null | null | null | venus/misc/observable.py | nagylzs/python-venus-lib | 336d20532c32e874ab0a43cf866092b9e55dded5 | [
"Apache-2.0"
] | 1 | 2019-02-15T13:40:49.000Z | 2019-02-15T13:40:49.000Z | venus/misc/observable.py | nagylzs/python-venus-lib | 336d20532c32e874ab0a43cf866092b9e55dded5 | [
"Apache-2.0"
] | null | null | null | """The observable/observer pattern.
Sightly modified version of Michael Kent's observer.
See: http://radio-weblogs.com/0124960/2004/06/15.html
Changes:
# Remove the "Observer" class
Formally, you must use addObserver and removeObserver.
Reasons:
a.) "There should be only one obvious way to do it."
http://www.python.org/dev/peps/pep-0020/
The original code had two ways to do it.
b.) The observer-observable model is often used to hide implementation details, remove dependencies between
modules, separate application levels. Any object should be able to observe any observable. The observable should
be fully functional without knowing anything about its observers. Formally, if we require observers to have a
given subclass, we assume some property of the observer (its base class), which we should not do.
Example:
l = wx.ListCtrl(parent,-1)
observable.addObserver(l,"ClearAll","emptied")
observable.notifyObservers("emptied")
It is much easier to use existing class hierarchy. In this example, you do not really want to subclass each
wx.* class, create your own "ObserverListCtrl" and "ObserverCheckBox" etc.
# Add positional and keywords arguments.
The original signature
notify_observers(self, event=None, msg=None)
Is replaced with:
notify_observers(self, event=None, *args, **kwargs)
It is recommended to always use keyword arguments with default values (where applicable). This way event handler
methods with different signatures will be able to receive the same (or different) events. This also removes
assumed knowledge on the observable's part. (The observable does not really need to know the signatures of its
observers, in fact ideally it should know nothing about its observers...)
# Added support for using different methods for different events, with the same observer. The original code could
only handle one type of event per observer. The new code is able to call different handlers depending on the type of
the event.
"""
import weakref
import collections
import venus.i18n
_ = venus.i18n.get_my_translator(__file__)
MIN_EVT_ID = 1000
_evt_seq = MIN_EVT_ID
def new_event_id():
"""Creates a new event identifier.
Use this function to create event identifiers. Do not use constant
values, if it can be avoided.
"""
global _evt_seq
_evt_seq += 1
return _evt_seq
class Observable:
"""Implements the observer-observable pattern."""
def __init__(self, *args, **kwargs):
self._observers = weakref.WeakKeyDictionary()
self._wildcard_observers = weakref.WeakKeyDictionary()
self._events = {}
super().__init__(*args, **kwargs)
def add_observer(self, observer, cbname, *events):
"""Add an observer for the given event(s).
:param observer: The observer object.
:param cbname: Name of the method of the observer objects to be called when the event fires.
:param events: A list of events. By not giving any event, you may create a wildcard observer that
listens for everything.
Please note that one observer can register at most on of its methods for observing. Subsequent add_observer()
calls will overwrite the previously given handlers.
The only one exception is when you listen to the 'None' event. This is a wildcard and will match any and all
events. It will always be called.
"""
if events:
if observer in self._observers:
handlers = self._observers[observer]
else:
handlers = self._observers[observer] = {}
for event in events:
handlers[event] = cbname
if event not in self._events:
self._events[event] = weakref.WeakSet()
self._events[event].add(observer)
else:
self._wildcard_observers[observer] = cbname
def remove_wildcard_observer(self, observer):
"""Remove wildcard observer.
A wildcard observer listens for any event.
"""
if observer in self._wildcard_observers:
del self._wildcard_observers[observer]
return True
def remove_observer(self, observer, *events):
"""Remove observer for the given event(s).
:param observer: Observer to be removed.
:param events: A list of events. If you do not give any event, then all events will be removed.
Please note that you cannot remove a wildcard observer with this method. To remove a wildcard observer,
call remove_wildcard_observer().
"""
if observer in self._observers:
if events:
handlers = self._observers[observer]
for event in events:
if event in handlers:
del handlers[event]
if event in self._events:
self._events[event].remove(observer)
# If there are no event handlers left, remove the observer.
if not handlers:
del self._observers[observer]
else:
del self._observers[observer]
def notify_observers(self, event=None, *args, **kwargs):
"""Notify all observers about an event.
:param event: The event. Should be a hashable value. By using an event value of None, you can notify the
wildcard observers, but not the others.
You can pass additional positional and keywords arguments. These arguments will be passed to the event
handler(s) of the observer(s) that are listening for the given event.
"""
if event in self._events:
# We iterate over a copy of observers, because observer list may change while we send notifications.
# E.g. the called event handler is able to add/remove observers
observers = [observer for observer in self._events[event]]
for observer in observers:
handlers = self._observers[observer]
cbname = handlers[event]
cb = getattr(observer, cbname, None)
if cb is None:
raise NotImplementedError(_("Observer has no %s method.") % cbname)
cb(self, event, *args, **kwargs)
# Wildcard
for observer in self._wildcard_observers:
cbname = self._wildcard_observers[observer]
cb = getattr(observer, cbname, None)
if cb is None:
raise NotImplementedError(_("Wildcard observer has no %s method.") % cbname)
cb(self, event, *args, **kwargs)
def observed_events(self):
"""Generator over observed events.
Please note that None will not be listed, even if there is a wildcard observer."""
for event in self._events:
yield event
def is_observed(self):
"""Tells if the observable has any (normal or wildcard) observers."""
# These are weak references, so we have to find a non-empty one.
for _ in self._observers:
return True
for _ in self._wildcard_observers:
return True
| 38.486772 | 120 | 0.651636 |
666c5ee89c15350c8094d733cb689e4935bf86eb | 1,902 | py | Python | app/ui/ui_set_gas_price.py | snitron/EVM-Simulator | 0569f907eb0a597e45378d0a155d9bb0a656359a | [
"MIT"
] | 28 | 2019-10-19T20:01:27.000Z | 2022-03-25T09:05:50.000Z | app/ui/ui_set_gas_price.py | snitron/EVM-Simulator | 0569f907eb0a597e45378d0a155d9bb0a656359a | [
"MIT"
] | 9 | 2020-07-16T19:00:01.000Z | 2020-07-17T18:14:01.000Z | app/ui/ui_set_gas_price.py | snitron/EVM-Simulator | 0569f907eb0a597e45378d0a155d9bb0a656359a | [
"MIT"
] | 7 | 2021-01-05T12:42:36.000Z | 2022-03-27T14:26:34.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt_templates/ui_set_gas_price.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SetGasPriceDialog(object):
def setupUi(self, SetGasPriceDialog):
SetGasPriceDialog.setObjectName("SetGasPriceDialog")
SetGasPriceDialog.resize(300, 140)
self.buttonBox = QtWidgets.QDialogButtonBox(SetGasPriceDialog)
self.buttonBox.setGeometry(QtCore.QRect(70, 90, 161, 41))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(SetGasPriceDialog)
self.label.setGeometry(QtCore.QRect(70, 30, 121, 16))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(SetGasPriceDialog)
self.lineEdit.setGeometry(QtCore.QRect(70, 50, 161, 21))
self.lineEdit.setObjectName("lineEdit")
self.retranslateUi(SetGasPriceDialog)
self.buttonBox.accepted.connect(SetGasPriceDialog.accept)
self.buttonBox.rejected.connect(SetGasPriceDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SetGasPriceDialog)
def retranslateUi(self, SetGasPriceDialog):
_translate = QtCore.QCoreApplication.translate
SetGasPriceDialog.setWindowTitle(_translate("SetGasPriceDialog", "Set Gas Price"))
self.label.setText(_translate("SetGasPriceDialog", "Gas Price:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
SetGasPriceDialog = QtWidgets.QDialog()
ui = Ui_SetGasPriceDialog()
ui.setupUi(SetGasPriceDialog)
SetGasPriceDialog.show()
sys.exit(app.exec_())
| 39.625 | 106 | 0.731335 |
e3039f2771ce56e05fe940df09bd926aa0eeef98 | 5,445 | py | Python | Projects/Ensembling/CIFAR100/models/sdenet.py | AkibMashrur/Research | a981e3410917216e03e09431c837607543905d83 | [
"Apache-2.0"
] | null | null | null | Projects/Ensembling/CIFAR100/models/sdenet.py | AkibMashrur/Research | a981e3410917216e03e09431c837607543905d83 | [
"Apache-2.0"
] | null | null | null | Projects/Ensembling/CIFAR100/models/sdenet.py | AkibMashrur/Research | a981e3410917216e03e09431c837607543905d83 | [
"Apache-2.0"
] | null | null | null | # Copied from https://github.com/Lingkai-Kong/SDE-Net/blob/master/SVHN/models/sdenet.py
"""
Created on Mon Mar 11 16:42:11 2019
@author: lingkaikong
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
__all__ = ['SDENet']
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
#print(m.bias)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=1e-3)
if m.bias is not None:
init.constant_(m.bias, 0)
#torch.manual_seed(0)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class Drift(nn.Module):
def __init__(self, dim):
super(Drift, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
def forward(self, t, x):
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(t, out)
out = self.norm3(out)
return out
class Diffusion(nn.Module):
def __init__(self, dim_in, dim_out):
super(Diffusion, self).__init__()
self.norm1 = norm(dim_in)
self.relu = nn.ReLU(inplace=True)
self.conv1 = ConcatConv2d(dim_in, dim_out, 3, 1, 1)
self.norm2 = norm(dim_in)
self.conv2 = ConcatConv2d(dim_in, dim_out, 3, 1, 1)
self.norm3 = norm(dim_in)
self.conv3 = ConcatConv2d(dim_in, dim_out, 3, 1, 1)
self.fc = nn.Sequential(norm(dim_out), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1)), Flatten(), nn.Linear(dim_out, 1), nn.Sigmoid())
def forward(self, t, x):
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(t, out)
out = self.norm3(out)
out = self.relu(out)
out = self.conv3(t, out)
out = self.fc(out)
return out
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
shape = torch.prod(torch.tensor(x.shape[1:])).item()
return x.view(-1, shape)
class SDENet(nn.Module):
def __init__(self, layer_depth, num_classes=10, dim = 64):
super(SDENet, self).__init__()
self.layer_depth = layer_depth
self.downsampling_layers = nn.Sequential(
nn.Conv2d(3, dim, 3, 1),
norm(dim),
nn.ReLU(inplace=True),
nn.Conv2d(dim, dim, 4, 2, 1),
norm(dim),
nn.ReLU(inplace=True),
nn.Conv2d(dim, dim, 4, 2, 1),
)
self.drift = Drift(dim)
self.diffusion = Diffusion(dim, dim)
self.fc_layers = nn.Sequential(norm(dim), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1)), Flatten(), nn.Linear(dim, 20))
self.deltat = 6./self.layer_depth
self.apply(init_params)
self.sigma = 50
def forward(self, x, training_diffusion=False):
out = self.downsampling_layers(x)
if not training_diffusion:
t = 0
diffusion_term = self.sigma*self.diffusion(t, out)
diffusion_term = torch.unsqueeze(diffusion_term, 2)
diffusion_term = torch.unsqueeze(diffusion_term, 3)
for i in range(self.layer_depth):
t = 6*(float(i))/self.layer_depth
out = out + self.drift(t, out)*self.deltat + diffusion_term*math.sqrt(self.deltat)*torch.randn_like(out).to(x)
final_out = self.fc_layers(out)
else:
t = 0
final_out = self.diffusion(t, out.detach())
return final_out
def test():
model = SDENet(layer_depth=6, num_classes=10, dim=64)
return model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == '__main__':
model = test()
num_params = count_parameters(model)
print(num_params) | 31.656977 | 147 | 0.595041 |
934d55cab0f4fe65121120a091896d64ce49a6d5 | 3,719 | py | Python | tests/test_block_until_url.py | aniruddha2000/init | fe2a32d2736c359a6911cc22bc42007ac97c5b10 | [
"BSD-3-Clause"
] | 3 | 2017-10-13T18:40:37.000Z | 2020-02-05T07:36:04.000Z | tests/test_block_until_url.py | aniruddha2000/init | fe2a32d2736c359a6911cc22bc42007ac97c5b10 | [
"BSD-3-Clause"
] | null | null | null | tests/test_block_until_url.py | aniruddha2000/init | fe2a32d2736c359a6911cc22bc42007ac97c5b10 | [
"BSD-3-Clause"
] | 5 | 2017-03-07T03:53:55.000Z | 2020-08-12T13:11:17.000Z | #!/usr/bin/python2.7
# Copyright (c) 2013 The CoreOS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import select
import signal
import subprocess
import threading
import time
import unittest
script_path = os.path.abspath('%s/../../bin/block-until-url' % __file__)
class UsageTestCase(unittest.TestCase):
def test_no_url(self):
proc = subprocess.Popen([script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 1)
self.assertEquals(out, '')
self.assertIn('invalid url', err)
def test_invalid_url(self):
proc = subprocess.Popen([script_path, 'fooshizzle'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 1)
self.assertEquals(out, '')
self.assertIn('invalid url', err)
class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_test_data(self):
if self.path == '/ok':
ok_data = 'OK!\n'
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-Length', str(len(ok_data)))
if self.command != 'HEAD':
self.wfile.write(ok_data)
elif self.path == '/404':
self.send_error(404)
else:
# send nothing so curl fails
pass
def do_GET(self):
self.send_test_data()
def do_HEAD(self):
self.send_test_data()
def log_message(self, format, *args):
pass
class HttpTestCase(unittest.TestCase):
def setUp(self):
self.server = BaseHTTPServer.HTTPServer(
('localhost', 0), TestRequestHandler)
self.server_url = 'http://%s:%s' % self.server.server_address
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def tearDown(self):
self.server.shutdown()
def test_quick_ok(self):
proc = subprocess.Popen([script_path, '%s/ok' % self.server_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 0)
self.assertEquals(out, '')
self.assertEquals(err, '')
def test_quick_404(self):
proc = subprocess.Popen([script_path, '%s/404' % self.server_url],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self.assertEquals(proc.returncode, 0)
self.assertEquals(out, '')
self.assertEquals(err, '')
def test_timeout(self):
proc = subprocess.Popen([script_path, '%s/bogus' % self.server_url],
bufsize=4096,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
timeout = time.time() + 2 # kill after 2 seconds
while time.time() < timeout:
time.sleep(0.1)
self.assertIs(proc.poll(), None, 'script terminated early!')
proc.terminate()
out, err = proc.communicate()
self.assertEquals(proc.returncode, -signal.SIGTERM)
self.assertEquals(out, '')
self.assertEquals(err, '')
if __name__ == '__main__':
unittest.main()
| 32.622807 | 76 | 0.579726 |
6842b4a6dde0698cf31ee68ad9c56f6d94197251 | 2,903 | py | Python | tests/test_resource_constraints.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | 2 | 2022-01-29T03:46:31.000Z | 2022-02-14T14:06:35.000Z | tests/test_resource_constraints.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | tests/test_resource_constraints.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | import unittest
import psutil
from durations import Duration
from tests.fixtures.algorithms import DeviatingFromMean
from timeeval import TimeEval, DatasetManager, ResourceConstraints, Algorithm
from timeeval.resource_constraints import GB, DEFAULT_TIMEOUT
class TestResourceConstraints(unittest.TestCase):
def setUp(self) -> None:
# must reserve 1 GB for OS and other software
self.usable_memory = psutil.virtual_memory().total - GB
self.usable_cpus = float(psutil.cpu_count())
def test_default(self):
limits = ResourceConstraints()
mem, cpu = limits.get_compute_resource_limits()
self.assertEqual(mem, self.usable_memory)
self.assertEqual(cpu, self.usable_cpus)
def test_default_from_tasks_per_host(self):
tasks = 2
limits = ResourceConstraints(tasks_per_host=tasks)
mem, cpu = limits.get_compute_resource_limits()
self.assertEqual(mem, self.usable_memory / tasks)
self.assertEqual(cpu, self.usable_cpus / tasks)
def test_explicit_limits(self):
mem_limit = 1325
cpu_limit = 1.256
mem, cpu = ResourceConstraints(
task_memory_limit=mem_limit
).get_compute_resource_limits()
self.assertEqual(mem, mem_limit)
self.assertEqual(cpu, self.usable_cpus)
mem, cpu = ResourceConstraints(
task_cpu_limit=cpu_limit
).get_compute_resource_limits()
self.assertEqual(mem, self.usable_memory)
self.assertEqual(cpu, cpu_limit)
def test_overwrites(self):
tasks = 2
mem_overwrite = 1325
cpu_overwrite = 1.256
limits = ResourceConstraints(tasks_per_host=tasks, task_memory_limit=12)
mem, cpu = limits.get_compute_resource_limits(
memory_overwrite=mem_overwrite,
cpu_overwrite=cpu_overwrite,
)
self.assertEqual(mem, mem_overwrite)
self.assertEqual(cpu, cpu_overwrite)
def test_tasks_per_node_overwrite_when_non_distributed(self):
limits = ResourceConstraints(tasks_per_host=4)
algorithm = Algorithm(name="dummy", main=DeviatingFromMean)
timeeval = TimeEval(DatasetManager("./tests/example_data"), [("test", "dataset-int")], [algorithm],
distributed=False,
resource_constraints=limits)
self.assertEqual(1, timeeval.exps.resource_constraints.tasks_per_host)
def test_timeout(self):
self.assertEqual(ResourceConstraints.default_constraints().get_train_timeout(), DEFAULT_TIMEOUT)
self.assertEqual(ResourceConstraints.default_constraints().get_execute_timeout(), DEFAULT_TIMEOUT)
def test_timeout_overwrite(self):
timeout_overwrite = Duration("1 minute")
self.assertEqual(ResourceConstraints.default_constraints().get_train_timeout(timeout_overwrite), timeout_overwrite)
| 38.197368 | 123 | 0.703066 |
335b2a381a93bcbae337b96905c9315643cb5f84 | 4,891 | py | Python | run_second.py | b04901014/FT-w2v2-ser | e70175c2620b786269105ab0301cc0caab2911d0 | [
"MIT"
] | 32 | 2021-10-30T00:52:24.000Z | 2022-03-21T00:10:46.000Z | run_second.py | b04901014/FT-w2v2-ser | e70175c2620b786269105ab0301cc0caab2911d0 | [
"MIT"
] | 7 | 2021-11-05T11:12:46.000Z | 2022-02-07T15:21:42.000Z | run_second.py | b04901014/FT-w2v2-ser | e70175c2620b786269105ab0301cc0caab2911d0 | [
"MIT"
] | 6 | 2021-11-02T07:21:10.000Z | 2022-03-17T11:07:53.000Z | from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pretrain.trainer import SecondPassEmoClassifier
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--saving_path', type=str, default='pretrain/checkpoints_second')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--dynamic_batch', action='store_true')
parser.add_argument('--training_step', type=int, default=120000)
parser.add_argument('--warmup_step', type=int, default=4000)
parser.add_argument('--maxseqlen', type=float, default=10.0)
parser.add_argument('--resume_checkpoint', type=str, default=None)
parser.add_argument('--precision', type=int, choices=[16, 32], default=32)
parser.add_argument('--num_clusters', type=str, default='8,64,512,4096')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--accelerator', type=str, default='ddp')
parser.add_argument('--use_bucket_sampler', action='store_true')
parser.add_argument('--train_bucket_size', type=int, default=50)
parser.add_argument('--val_bucket_size', type=int, default=20)
parser.add_argument('--unsupdatadir', type=str, default=None)
parser.add_argument('--check_val_every_n_epoch', type=int, default=1)
parser.add_argument('--save_top_k', type=int, default=2)
parser.add_argument('--valid_split', type=float, default=1.0)
parser.add_argument('--w2v2_pretrain_path', type=str, default=None)
parser.add_argument('--datadir', type=str, required=True)
parser.add_argument('--labelpath', type=str, required=True)
args = parser.parse_args()
nclusters = [int(x) for x in args.num_clusters.split(',')]
checkpoint_callback = ModelCheckpoint(
dirpath=args.saving_path,
filename='w2v2-{epoch:02d}-{valid_loss:.2f}-{valid_acc:.2f}',
# save_top_k=args.save_top_k,
verbose=True,
# monitor='valid_acc',
# mode='max',
save_last=True
)
wrapper = Trainer(
precision=args.precision,
amp_backend='native',
callbacks=[checkpoint_callback],
resume_from_checkpoint=args.resume_checkpoint,
check_val_every_n_epoch=args.check_val_every_n_epoch,
max_steps=args.training_step,
gpus=(-1 if args.distributed else 1),
accelerator=(args.accelerator if args.distributed else None),
replace_sampler_ddp=False,
logger=False
)
if args.w2v2_pretrain_path is None:
model = SecondPassEmoClassifier(maxstep=args.training_step,
batch_size=args.batch_size,
lr=args.lr,
warmup_step=args.warmup_step,
nclusters=nclusters,
maxseqlen=int(16000*args.maxseqlen),
datadir=args.datadir,
unsupdatadir=args.unsupdatadir,
labeldir=args.labelpath,
distributed=args.distributed,
use_bucket_sampler=args.use_bucket_sampler,
train_bucket_size=args.train_bucket_size,
val_bucket_size=args.val_bucket_size,
dynamic_batch=args.dynamic_batch,
valid_split=args.valid_split)
else:
model = SecondPassEmoClassifier.load_from_checkpoint(args.w2v2_pretrain_path, strict=False,
maxstep=args.training_step,
batch_size=args.batch_size,
lr=args.lr,
warmup_step=args.warmup_step,
nclusters=nclusters,
maxseqlen=int(16000*args.maxseqlen),
datadir=args.datadir,
unsupdatadir=args.unsupdatadir,
labeldir=args.labelpath,
distributed=args.distributed,
use_bucket_sampler=args.use_bucket_sampler,
train_bucket_size=args.train_bucket_size,
val_bucket_size=args.val_bucket_size,
dynamic_batch=args.dynamic_batch,
valid_split=args.valid_split)
for linear in model.linearheads:
linear.reset_parameters()
wrapper.fit(model)
| 53.163043 | 100 | 0.570844 |
5c1782d0ebc598599a860d268fe061e06cc639fa | 795 | py | Python | monitor/core/settings.py | laozhudetui/wam | 3101dae034344ec255c9f3dd165d2aae6b3bea95 | [
"MIT"
] | 227 | 2018-08-25T12:50:30.000Z | 2022-03-31T11:18:33.000Z | monitor/core/settings.py | laozhudetui/wam | 3101dae034344ec255c9f3dd165d2aae6b3bea95 | [
"MIT"
] | 1 | 2018-09-02T07:51:32.000Z | 2018-09-03T00:29:45.000Z | monitor/core/settings.py | laozhudetui/wam | 3101dae034344ec255c9f3dd165d2aae6b3bea95 | [
"MIT"
] | 48 | 2018-08-25T13:57:52.000Z | 2022-01-10T15:59:27.000Z | #!/usr/bin/env python
# coding: utf-8
# __buildin__ modules
import os
# 监控程序循环间隔(秒)
MONITOR_CHECK_INTERVAL = 3600
# 下载大小限制(字节)
DOWNLOAD_MAX_BYTES = 104857600 # 1024*1024*100 = 100MB
# 应用包存储路径
PACKAGE_SOTRE_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), '../wam/packages/')
FILE_STORE_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), '../wam/files/')
# Diff 比较的文件扩展名
DIFF_FILTER_EXT = 'php,jsp,js,asp,aspx,py,css,html,htm,txt,cs,xml,inc,info,rb,md,ini,java'
# 当强行编码 Diff 原始输出结果出错时,用替代内容代替该行
CONTENT_REPLACE = '***** line contains special binary bytes cant show here *****\n'
# 分析器插件所在路径
ANALYSIS_PLUGIN_DIR = os.path.join(os.path.dirname(__file__), '../plugins')
# 日志存储路径
LOG_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'logs/wam.log')
| 27.413793 | 96 | 0.730818 |
2b3f69c9481cd2cc235adf711a25d6668754190a | 22,428 | py | Python | airflow/providers/google/marketing_platform/operators/display_video.py | odavid/airflow | 7269d15adfb74188359757b1705485f5d368486a | [
"Apache-2.0"
] | 1 | 2021-04-16T12:40:40.000Z | 2021-04-16T12:40:40.000Z | airflow/providers/google/marketing_platform/operators/display_video.py | odavid/airflow | 7269d15adfb74188359757b1705485f5d368486a | [
"Apache-2.0"
] | null | null | null | airflow/providers/google/marketing_platform/operators/display_video.py | odavid/airflow | 7269d15adfb74188359757b1705485f5d368486a | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google DisplayVideo operators.
"""
import csv
import json
import shutil
import tempfile
import urllib.request
from typing import Any, Dict, List, Optional
from urllib.parse import urlparse
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook
from airflow.utils.decorators import apply_defaults
class GoogleDisplayVideo360CreateReportOperator(BaseOperator):
"""
Creates a query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360CreateReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/createquery`
:param body: Report object passed to the request's body as described here:
https://developers.google.com/bid-manager/v1/queries#resource
:type body: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("body",)
template_ext = (".json",)
@apply_defaults
def __init__(
self,
body: Dict[str, Any],
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.body, str) and self.body.endswith('.json'):
with open(self.body, 'r') as file:
self.body = json.load(file)
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Creating Display & Video 360 report.")
response = hook.create_query(query=self.body)
report_id = response["queryId"]
self.xcom_push(context, key="report_id", value=report_id)
self.log.info("Created report with ID: %s", report_id)
return response
class GoogleDisplayVideo360DeleteReportOperator(BaseOperator):
"""
Deletes a stored query as well as the associated stored reports.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360DeleteReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/deletequery`
:param report_id: Report ID to delete.
:type report_id: str
:param report_name: Name of the report to delete.
:type report_name: str
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_id",)
@apply_defaults
def __init__(
self,
report_id: Optional[str] = None,
report_name: Optional[str] = None,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.report_id = report_id
self.report_name = report_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
if report_name and report_id:
raise AirflowException("Use only one value - `report_name` or `report_id`.")
if not (report_name or report_id):
raise AirflowException(
"Provide one of the values: `report_name` or `report_id`."
)
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
if self.report_id:
reports_ids_to_delete = [self.report_id]
else:
reports = hook.list_queries()
reports_ids_to_delete = [
report["queryId"]
for report in reports
if report["metadata"]["title"] == self.report_name
]
for report_id in reports_ids_to_delete:
self.log.info("Deleting report with id: %s", report_id)
hook.delete_query(query_id=report_id)
self.log.info("Report deleted.")
class GoogleDisplayVideo360DownloadReportOperator(BaseOperator):
"""
Retrieves a stored query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360DownloadReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/getquery`
:param report_id: Report ID to retrieve.
:type report_id: str
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param report_name: The report name to set when uploading the local file.
:type report_name: str
:param chunk_size: File will be downloaded in chunks of this many bytes.
:type chunk_size: int
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_id", "bucket_name", "report_name")
@apply_defaults
def __init__(
self,
report_id: str,
bucket_name: str,
report_name: Optional[str] = None,
gzip: bool = True,
chunk_size: int = 10 * 1024 * 1024,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.report_id = report_id
self.chunk_size = chunk_size
self.gzip = gzip
self.bucket_name = self._set_bucket_name(bucket_name)
self.report_name = report_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def _resolve_file_name(self, name: str) -> str:
new_name = name if name.endswith(".csv") else f"{name}.csv"
new_name = f"{new_name}.gz" if self.gzip else new_name
return new_name
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
gcs_hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to
)
resource = hook.get_query(query_id=self.report_id)
# Check if report is ready
if resource["metadata"]["running"]:
raise AirflowException(f"Report {self.report_id} is still running")
# If no custom report_name provided, use DV360 name
file_url = resource["metadata"]["googleCloudStoragePathForLatestReport"]
report_name = self.report_name or urlparse(file_url).path.split("/")[-1]
report_name = self._resolve_file_name(report_name)
# Download the report
self.log.info("Starting downloading report %s", self.report_id)
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
with urllib.request.urlopen(file_url) as response:
shutil.copyfileobj(response, temp_file, length=self.chunk_size)
temp_file.flush()
# Upload the local file to bucket
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=report_name,
gzip=self.gzip,
filename=temp_file.name,
mime_type="text/csv",
)
self.log.info(
"Report %s was saved in bucket %s as %s.",
self.report_id,
self.bucket_name,
report_name,
)
self.xcom_push(context, key="report_name", value=report_name)
class GoogleDisplayVideo360RunReportOperator(BaseOperator):
"""
Runs a stored query to generate a report.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360RunReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/runquery`
:param report_id: Report ID to run.
:type report_id: str
:param params: Parameters for running a report as described here:
https://developers.google.com/bid-manager/v1/queries/runquery
:type params: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service account making the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("report_id", "params")
@apply_defaults
def __init__(
self,
report_id: str,
params: Dict[str, Any],
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.report_id = report_id
self.params = params
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info(
"Running report %s with the following params:\n %s",
self.report_id,
self.params,
)
hook.run_query(query_id=self.report_id, params=self.params)
class GoogleDisplayVideo360DownloadLineItemsOperator(BaseOperator):
"""
Retrieves line items in CSV format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360DownloadLineItemsOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1.1/lineitems/downloadlineitems`
:param request_body: dictionary with parameters that should be passed into.
More information about it can be found here:
https://developers.google.com/bid-manager/v1.1/lineitems/downloadlineitems
:type request_body: Dict[str, Any],
"""
template_fields = ("request_body", "bucket_name", "object_name")
@apply_defaults
def __init__(
self,
request_body: Dict[str, Any],
bucket_name: str,
object_name: str,
gzip: bool = False,
api_version: str = "v1.1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs
) -> None:
super().__init__(**kwargs)
self.request_body = request_body
self.object_name = object_name
self.bucket_name = bucket_name
self.gzip = gzip
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict) -> str:
gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
delegate_to=self.delegate_to,
)
self.log.info("Retrieving report...")
content: List[str] = hook.download_line_items(request_body=self.request_body)
with tempfile.NamedTemporaryFile("w+") as temp_file:
writer = csv.writer(temp_file)
writer.writerows(content)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=temp_file.name,
mime_type="text/csv",
gzip=self.gzip,
)
return f"{self.bucket_name}/{self.object_name}"
class GoogleDisplayVideo360UploadLineItemsOperator(BaseOperator):
"""
Uploads line items in CSV format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360UploadLineItemsOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1.1/lineitems/uploadlineitems`
:param request_body: request to upload line items.
:type request_body: Dict[str, Any]
:param bucket_name: The bucket form data is downloaded.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str,
:param filename: The filename to fetch.
:type filename: str,
:param dry_run: Upload status without actually persisting the line items.
:type filename: str,
"""
template_fields = (
"bucket_name",
"object_name",
)
@apply_defaults
def __init__(
self,
bucket_name: str,
object_name: str,
api_version: str = "v1.1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_name = object_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Uploading file %s...")
# Saving file in the temporary directory,
# downloaded file from the GCS could be a 1GB size or even more
with tempfile.NamedTemporaryFile("w+") as f:
line_items = gcs_hook.download(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=f.name,
)
f.flush()
hook.upload_line_items(line_items=line_items)
class GoogleDisplayVideo360CreateSDFDownloadTaskOperator(BaseOperator):
"""
Creates SDF operation task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360CreateSDFDownloadTaskOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/display-video/api/reference/rest`
:param version: The SDF version of the downloaded file..
:type version: str
:param partner_id: The ID of the partner to download SDF for.
:type partner_id: str
:param advertiser_id: The ID of the advertiser to download SDF for.
:type advertiser_id: str
:param parent_entity_filter: Filters on selected file types.
:type parent_entity_filter: Dict[str, Any]
:param id_filter: Filters on entities by their entity IDs.
:type id_filter: Dict[str, Any]
:param inventory_source_filter: Filters on Inventory Sources by their IDs.
:type inventory_source_filter: Dict[str, Any]
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service account making the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("body_request", )
@apply_defaults
def __init__(
self,
body_request: Dict[str, Any],
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body_request = body_request
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
self.log.info("Creating operation for SDF download task...")
operation = hook.create_sdf_download_operation(
body_request=self.body_request
)
return operation
class GoogleDisplayVideo360SDFtoGCSOperator(BaseOperator):
"""
Download SDF media and save it in the Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360SDFtoGCSOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/display-video/api/reference/rest`
:param version: The SDF version of the downloaded file..
:type version: str
:param partner_id: The ID of the partner to download SDF for.
:type partner_id: str
:param advertiser_id: The ID of the advertiser to download SDF for.
:type advertiser_id: str
:param parent_entity_filter: Filters on selected file types.
:type parent_entity_filter: Dict[str, Any]
:param id_filter: Filters on entities by their entity IDs.
:type id_filter: Dict[str, Any]
:param inventory_source_filter: Filters on Inventory Sources by their IDs.
:type inventory_source_filter: Dict[str, Any]
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to work, the service account making the
request must have domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = ("operation_name", "bucket_name", "object_name")
@apply_defaults
def __init__(
self,
operation_name: str,
bucket_name: str,
object_name: str,
gzip: bool = False,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.bucket_name = bucket_name
self.object_name = object_name
self.gzip = gzip
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
)
gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
self.log.info("Retrieving operation...")
operation = hook.get_sdf_download_operation(operation_name=self.operation_name)
self.log.info("Creating file for upload...")
media = hook.download_media(resource_name=operation)
self.log.info("Sending file to the Google Cloud Storage...")
with tempfile.NamedTemporaryFile() as temp_file:
hook.download_content_from_request(
temp_file, media, chunk_size=1024 * 1024
)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=temp_file.name,
gzip=self.gzip,
)
return f"{self.bucket_name}/{self.object_name}"
| 36.232633 | 108 | 0.655029 |
0358b1489bcd6817381968f23b41ac7d406c12f7 | 1,198 | py | Python | deafwave/types/generator_types.py | SparXalt/deafwave-blockchain | 579eac55d55285f750c622bf66a1aa30ed6d949d | [
"Apache-2.0"
] | null | null | null | deafwave/types/generator_types.py | SparXalt/deafwave-blockchain | 579eac55d55285f750c622bf66a1aa30ed6d949d | [
"Apache-2.0"
] | null | null | null | deafwave/types/generator_types.py | SparXalt/deafwave-blockchain | 579eac55d55285f750c622bf66a1aa30ed6d949d | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import List
from deafwave.types.blockchain_format.program import SerializedProgram
from deafwave.util.ints import uint32
from deafwave.util.streamable import Streamable, streamable
class GeneratorBlockCacheInterface:
def get_generator_for_block_height(self, height: uint32) -> SerializedProgram:
# Requested block must be a transaction block
pass
@dataclass(frozen=True)
@streamable
class GeneratorArg(Streamable):
"""`GeneratorArg` contains data from already-buried blocks in the blockchain"""
block_height: uint32
generator: SerializedProgram
@dataclass(frozen=True)
class CompressorArg:
"""`CompressorArg` is used as input to the Block Compressor"""
block_height: uint32
generator: SerializedProgram
start: int
end: int
@dataclass(frozen=True)
@streamable
class BlockGenerator(Streamable):
program: SerializedProgram
generator_args: List[GeneratorArg]
def block_height_list(self) -> List[uint32]:
return [a.block_height for a in self.generator_args]
def generator_refs(self) -> List[SerializedProgram]:
return [a.generator for a in self.generator_args]
| 27.227273 | 83 | 0.761269 |
4843212bcd000cdc7faab789686c5e6c53eb1636 | 3,824 | bzl | Python | kotlin/internal/jvm/android.bzl | eiiches/rules_kotlin | 89a082c69ce3bc7b0a996aea12cb2d118ab0bbc7 | [
"Apache-2.0"
] | null | null | null | kotlin/internal/jvm/android.bzl | eiiches/rules_kotlin | 89a082c69ce3bc7b0a996aea12cb2d118ab0bbc7 | [
"Apache-2.0"
] | null | null | null | kotlin/internal/jvm/android.bzl | eiiches/rules_kotlin | 89a082c69ce3bc7b0a996aea12cb2d118ab0bbc7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//kotlin/internal/jvm:jvm.bzl",
_kt_jvm_library = "kt_jvm_library",
)
_ANDROID_SDK_JAR = "%s" % Label("//third_party:android_sdk")
def _kt_android_artifact(
name,
srcs = [],
deps = [],
plugins = [],
friends = None,
associates = [],
kotlinc_opts = None,
javac_opts = None,
enable_data_binding = False,
**kwargs):
"""Delegates Android related build attributes to the native rules but uses the Kotlin builder to compile Java and
Kotlin srcs. Returns a sequence of labels that a wrapping macro should export.
"""
base_name = name + "_base"
kt_name = name + "_kt"
# TODO(bazelbuild/rules_kotlin/issues/273): This should be retrieved from a provider.
base_deps = deps + [_ANDROID_SDK_JAR]
native.android_library(
name = base_name,
visibility = ["//visibility:private"],
exports = base_deps,
deps = deps if enable_data_binding else [],
enable_data_binding = enable_data_binding,
**kwargs
)
_kt_jvm_library(
name = kt_name,
srcs = srcs,
deps = base_deps + [base_name],
plugins = plugins,
friends = friends,
associates = associates,
testonly = kwargs.get("testonly", default = False),
visibility = ["//visibility:private"],
kotlinc_opts = kotlinc_opts,
javac_opts = javac_opts,
)
return [base_name, kt_name]
def kt_android_library(name, exports = [], visibility = None, **kwargs):
"""Creates an Android sandwich library.
`srcs`, `deps`, `plugins` are routed to `kt_jvm_library` the other android
related attributes are handled by the native `android_library` rule.
"""
native.android_library(
name = name,
exports = exports + _kt_android_artifact(name, **kwargs),
visibility = visibility,
tags = kwargs.get("tags", default = None),
testonly = kwargs.get("testonly", default = 0),
)
def kt_android_local_test(
name,
jvm_flags = None,
manifest = None,
manifest_values = None,
test_class = None,
size = None,
timeout = None,
flaky = False,
shard_count = None,
visibility = None,
**kwargs):
"""Creates a testable Android sandwich library.
`srcs`, `deps`, `plugins`, `friends` are routed to `kt_jvm_library` the other android
related attributes are handled by the native `android_library` rule while the test attributes
are picked out and handled by the `android_local_test` rule.
"""
native.android_local_test(
name = name,
deps = kwargs.get("deps", []) + _kt_android_artifact(name = name, **kwargs),
jvm_flags = jvm_flags,
test_class = test_class,
visibility = visibility,
size = size,
timeout = timeout,
flaky = flaky,
shard_count = shard_count,
custom_package = kwargs.get("custom_package", default = None),
manifest = manifest,
manifest_values = manifest_values,
tags = kwargs.get("tags", default = None),
testonly = kwargs.get("testonly", default = True),
)
| 34.142857 | 117 | 0.637814 |
c9a3a1c17a895153a80889bbb5b2b9d5c90cb823 | 7,730 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/cloud/rackspace/rax_cdb.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/modules/cloud/rackspace/rax_cdb.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/modules/cloud/rackspace/rax_cdb.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_cdb
short_description: create/delete or resize a Rackspace Cloud Databases instance
description:
- creates / deletes or resize a Rackspace Cloud Databases instance
and optionally waits for it to be 'running'. The name option needs to be
unique since it's used to identify the instance.
version_added: "1.8"
options:
name:
description:
- Name of the databases server instance
flavor:
description:
- flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
default: 1
volume:
description:
- Volume size of the database 1-150GB
default: 2
cdb_type:
description:
- type of instance (i.e. MySQL, MariaDB, Percona)
default: MySQL
version_added: "2.0"
aliases: ['type']
cdb_version:
description:
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
choices: ['5.1', '5.6', '10']
version_added: "2.0"
aliases: ['version']
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
wait:
description:
- wait for the instance to be in state 'running' before returning
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: "Simon JAILLET (@jails)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Cloud Databases
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax_cdb
credentials: ~/.raxpub
region: IAD
name: db-server1
flavor: 1
volume: 2
cdb_type: MySQL
cdb_version: 5.6
wait: yes
state: present
register: rax_db_server
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
def find_instance(name):
cdb = pyrax.cloud_databases
instances = cdb.list()
if instances:
for instance in instances:
if instance.name == name:
return instance
return False
def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
for arg, value in dict(name=name, flavor=flavor,
volume=volume, type=cdb_type, version=cdb_version
).items():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg)
if not (volume >= 1 and volume <= 150):
module.fail_json(msg='volume is required to be between 1 and 150')
cdb = pyrax.cloud_databases
flavors = []
for item in cdb.list_flavors():
flavors.append(item.id)
if not (flavor in flavors):
module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
changed = False
instance = find_instance(name)
if not instance:
action = 'create'
try:
instance = cdb.create(name=name, flavor=flavor, volume=volume,
type=cdb_type, version=cdb_version)
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
else:
action = None
if instance.volume.size != volume:
action = 'resize'
if instance.volume.size > volume:
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True
if int(instance.flavor.id) != flavor:
action = 'resize'
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
instance.resize(flavor)
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
if wait and instance.status != 'ACTIVE':
module.fail_json(changed=changed, action=action,
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be created' % name)
module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
def delete_instance(module, name, wait, wait_timeout):
if not name:
module.fail_json(msg='name is required for the "rax_cdb" module')
changed = False
instance = find_instance(name)
if not instance:
module.exit_json(changed=False, action='delete')
try:
instance.delete()
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
attempts=wait_timeout)
if wait and instance.status != 'SHUTDOWN':
module.fail_json(changed=changed, action='delete',
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be deleted' % name)
module.exit_json(changed=changed, action='delete',
cdb=rax_to_dict(instance))
def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
# act on the state
if state == 'present':
save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout)
elif state == 'absent':
delete_instance(module, name, wait, wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
flavor=dict(type='int', default=1),
volume=dict(type='int', default=2),
cdb_type=dict(type='str', default='MySQL', aliases=['type']),
cdb_version=dict(type='str', default='5.6', aliases=['version']),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
flavor = module.params.get('flavor')
volume = module.params.get('volume')
cdb_type = module.params.get('cdb_type')
cdb_version = module.params.get('cdb_version')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
if __name__ == '__main__':
main()
| 30.077821 | 108 | 0.602846 |
b4cbba8afbc0264448a6736fa1ce99912a641f4f | 998 | py | Python | tests/test_http_input.py | hknust/cwltool | 2978c8bff88be2ad357554c9291cc992d3e74a47 | [
"Apache-2.0"
] | null | null | null | tests/test_http_input.py | hknust/cwltool | 2978c8bff88be2ad357554c9291cc992d3e74a47 | [
"Apache-2.0"
] | null | null | null | tests/test_http_input.py | hknust/cwltool | 2978c8bff88be2ad357554c9291cc992d3e74a47 | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
from cwltool.pathmapper import PathMapper
def test_http_path_mapping():
class SubPathMapper(PathMapper):
def __init__(self, referenced_files, basedir, stagedir):
super(SubPathMapper, self).__init__(referenced_files, basedir, stagedir)
input_file_path = "https://raw.githubusercontent.com/common-workflow-language/cwltool/master/tests/2.fasta"
tempdir = tempfile.mkdtemp()
base_file = [
{
"class": "File",
"location": "https://raw.githubusercontent.com/common-workflow-language/cwltool/master/tests/2.fasta",
"basename": "chr20.fa",
}
]
pathmap = SubPathMapper(base_file, os.getcwd(), tempdir)._pathmap
assert input_file_path in pathmap
assert os.path.exists(pathmap[input_file_path].resolved)
with open(pathmap[input_file_path].resolved) as file:
contents = file.read()
assert ">Sequence 561 BP; 135 A; 106 C; 98 G; 222 T; 0 other;" in contents
| 33.266667 | 114 | 0.686373 |
95fbd9eab6cc3f2c56825ab31cac0886369b8225 | 710 | py | Python | scrapy_proj/openrecipes/spiders/steamykitchen_feedspider.py | fictivekin/openrecipes | 82b5c080168439b328f76a115aa2011fa4601384 | [
"Apache-2.0"
] | 300 | 2015-01-05T05:37:34.000Z | 2022-03-05T16:24:37.000Z | scrapy_proj/openrecipes/spiders/steamykitchen_feedspider.py | fictivekin/openrecipes | 82b5c080168439b328f76a115aa2011fa4601384 | [
"Apache-2.0"
] | 11 | 2015-05-14T04:15:22.000Z | 2018-01-27T17:22:32.000Z | scrapy_proj/openrecipes/spiders/steamykitchen_feedspider.py | fictivekin/openrecipes | 82b5c080168439b328f76a115aa2011fa4601384 | [
"Apache-2.0"
] | 100 | 2015-01-11T23:14:29.000Z | 2022-03-25T06:03:48.000Z | from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import XmlXPathSelector
from openrecipes.spiders.steamykitchen_spider import SteamykitchenMixin
class SteamykitchenfeedSpider(BaseSpider, SteamykitchenMixin):
name = "steamykitchen.feed"
allowed_domains = [
"steamykitchen.com",
"feeds.feedburner.com",
"feedproxy.google.com"
]
start_urls = [
"http://feeds.feedburner.com/SteamyKitchen",
]
def parse(self, response):
xxs = XmlXPathSelector(response)
links = xxs.select("//item/*[local-name()='origLink']/text()").extract()
return [Request(x, callback=self.parse_item) for x in links]
| 27.307692 | 80 | 0.695775 |
407f470903fcac46df435e71aab4b0943e8e1b7b | 2,105 | py | Python | pylimit/redis_helper.py | biplap-sarkar/pylim | 9f649334e325d032b27555665521dff44aa6cdfc | [
"Apache-2.0"
] | 17 | 2016-10-28T06:58:41.000Z | 2021-07-29T06:40:55.000Z | pylimit/redis_helper.py | biplap-sarkar/pylim | 9f649334e325d032b27555665521dff44aa6cdfc | [
"Apache-2.0"
] | 5 | 2016-11-15T02:42:27.000Z | 2021-04-20T09:00:14.000Z | pylimit/redis_helper.py | biplap-sarkar/pylim | 9f649334e325d032b27555665521dff44aa6cdfc | [
"Apache-2.0"
] | 10 | 2016-08-09T11:33:41.000Z | 2021-04-08T01:51:12.000Z | import redis
from redis.sentinel import Sentinel
from redis.client import Pipeline
import redis.client
class RedisHelper(object):
def __init__(self, host: str, port: int, is_sentinel=False,
sentinel_service=None, password=None):
self.host = host
self.port = port
self.is_sentinel = is_sentinel
self.sentinel_service = sentinel_service
self.password = password
self.connection = None
self.get_connection() # Ensure connection is established
def get_connection(self, is_read_only=False) -> redis.StrictRedis:
"""
Gets a StrictRedis connection for normal redis or for redis sentinel
based upon redis mode in configuration.
:type is_read_only: bool
:param is_read_only: In case of redis sentinel, it returns connection
to slave
:return: Returns a StrictRedis connection
"""
if self.connection is not None:
return self.connection
if self.is_sentinel:
kwargs = dict()
if self.password:
kwargs["password"] = self.password
sentinel = Sentinel([(self.host, self.port)], **kwargs)
if is_read_only:
connection = sentinel.slave_for(self.sentinel_service,
decode_responses=True)
else:
connection = sentinel.master_for(self.sentinel_service,
decode_responses=True)
else:
connection = redis.StrictRedis(host=self.host, port=self.port,
decode_responses=True,
password=self.password)
self.connection = connection
return connection
def get_atomic_connection(self) -> Pipeline:
"""
Gets a Pipeline for normal redis or for redis sentinel based upon
redis mode in configuration
:return: Returns a Pipeline object
"""
return self.get_connection().pipeline(True)
| 35.677966 | 77 | 0.587648 |
bc4bfbf3acd45a5504167921f95b0fc458410473 | 6,445 | py | Python | setup.py | cheshire/numpy | e31727075c0fc2a115c83e41e14a4600b7a4bf28 | [
"BSD-3-Clause"
] | null | null | null | setup.py | cheshire/numpy | e31727075c0fc2a115c83e41e14a4600b7a4bf28 | [
"BSD-3-Clause"
] | null | null | null | setup.py | cheshire/numpy | e31727075c0fc2a115c83e41e14a4600b7a4bf28 | [
"BSD-3-Clause"
] | 8 | 2015-12-27T21:28:21.000Z | 2019-06-17T20:39:35.000Z | #!/usr/bin/env python
"""NumPy: array processing for numbers, strings, records, and objects.
NumPy is a general-purpose array-processing package designed to
efficiently manipulate large multi-dimensional arrays of arbitrary
records without sacrificing too much speed for small multi-dimensional
arrays. NumPy is built on the Numeric code base and adds features
introduced by numarray as well as an extended C-API and the ability to
create arrays of arbitrary type which also makes NumPy suitable for
interfacing with general-purpose data-base applications.
There are also basic facilities for discrete fourier transform,
basic linear algebra and random number generation.
"""
DOCLINES = __doc__.split("\n")
import os
import shutil
import sys
import re
import subprocess
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
NAME = 'numpy'
MAINTAINER = "NumPy Developers"
MAINTAINER_EMAIL = "numpy-discussion@scipy.org"
DESCRIPTION = DOCLINES[0]
LONG_DESCRIPTION = "\n".join(DOCLINES[2:])
URL = "http://numpy.scipy.org"
DOWNLOAD_URL = "http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103"
LICENSE = 'BSD'
CLASSIFIERS = filter(None, CLASSIFIERS.split('\n'))
AUTHOR = "Travis E. Oliphant, et.al."
AUTHOR_EMAIL = "oliphant@enthought.com"
PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"]
MAJOR = 2
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the svn version as a string, raise a ValueError otherwise
def svn_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['svn', 'info'])
except OSError:
print(" --- Could not run svn info --- ")
return ""
r = re.compile('Revision: ([0-9]+)')
svnver = ""
out = out.decode()
for line in out.split('\n'):
m = r.match(line.strip())
if m:
svnver = m.group(1)
if not svnver:
print("Error while parsing svn version")
return svnver
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev'
# If in git or something, bypass the svn rev
if os.path.exists('.svn'):
FULLVERSION += svn_version()
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
short_version='%(version)s'
version='%(version)s'
release=%(isrelease)s
if not release:
version += '.dev'
import os
svn_version_file = os.path.join(os.path.dirname(__file__),
'core','__svn_version__.py')
if os.path.isfile(svn_version_file):
import imp
svn = imp.load_module('numpy.core.__svn_version__',
open(svn_version_file),
svn_version_file,
('.py','U',1))
version += svn.version
"""
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION, 'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.get_version('numpy/version.py') # sets config.version
return config
def setup_package():
# Rewrite the version file everytime
if os.path.exists('numpy/version.py'): os.remove('numpy/version.py')
write_version_py()
# Perform 2to3 if needed
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
src_path = local_path
if sys.version_info[0] == 3:
src_path = os.path.join(local_path, 'build', 'py3k')
sys.path.insert(0, os.path.join(local_path, 'tools'))
import py3tool
print("Converting to Python3 via 2to3...")
py3tool.sync_2to3('numpy', os.path.join(src_path, 'numpy'))
site_cfg = os.path.join(local_path, 'site.cfg')
if os.path.isfile(site_cfg):
shutil.copy(site_cfg, src_path)
# Run build
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
from numpy.distutils.core import setup
try:
setup(
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
configuration=configuration )
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
| 31.135266 | 100 | 0.635531 |
18ae5da59450d3e15499d10da9a30dbcda0350fd | 890 | py | Python | flTile/utils.py | rpwagner/tiled-display | 52d135bc163360fe55ce5521784b0ef48a8c82c9 | [
"Apache-2.0"
] | 1 | 2020-12-11T17:11:45.000Z | 2020-12-11T17:11:45.000Z | flTile/utils.py | rpwagner/tiled-display | 52d135bc163360fe55ce5521784b0ef48a8c82c9 | [
"Apache-2.0"
] | null | null | null | flTile/utils.py | rpwagner/tiled-display | 52d135bc163360fe55ce5521784b0ef48a8c82c9 | [
"Apache-2.0"
] | null | null | null | class CursorAutoMover:
def __init__(self, target, xrange=(0,5000), yrange=(0,3000), xvel=100, yvel=100):
self.target = target
self.xvel = xvel
self.yvel = yvel
self.xrange = xrange
self.yrange = yrange
self.visible = False
def draw(self):
pass
def update(self, secs, app):
posTuple = self.target.getPos()
pos = Vec2(posTuple[0], posTuple[1])
pos.x = pos.x + self.xvel * secs
pos.y = pos.y + self.yvel * secs
if pos.x > self.xrange[1]:
self.xvel = -abs(self.xvel)
if pos.y > self.yrange[1]:
self.yvel = -abs(self.yvel)
if pos.x < self.xrange[0]:
self.xvel = abs(self.xvel)
if pos.y < self.xrange[0]:
self.yvel = abs(self.yvel)
self.target.setPos(pos.x, pos.y)
# print "pos:", pos.x, pos.y
| 29.666667 | 85 | 0.531461 |
fbf525fa9ad00fa59dd1dcfa78a5a4a22a295312 | 10,691 | py | Python | simplerestler/element.py | svilborg/simplerestler | b6e36f318bd3c6b34264e382ce814afa53c8ca12 | [
"MIT"
] | 1 | 2015-01-26T21:30:03.000Z | 2015-01-26T21:30:03.000Z | simplerestler/element.py | svilborg/simplerestler | b6e36f318bd3c6b34264e382ce814afa53c8ca12 | [
"MIT"
] | null | null | null | simplerestler/element.py | svilborg/simplerestler | b6e36f318bd3c6b34264e382ce814afa53c8ca12 | [
"MIT"
] | null | null | null | """ Elements """
import errors
from utils import Utils
NL = "\n"
class Element:
"""Element."""
def __init__( self, name, parent=None):
self.name = name
self.params = []
self.parent = parent
self.content = []
def __call__( self, *args, **kwargs ):
self.params = args
return self
def add (self, line):
self.content.append(line)
if self.parent is not None:
self.parent.add(line)
return line
def __str__( self ):
return ''.join(self.content)
class UlElement(Element):
"""Ul Element."""
def __init__( self, parent=None):
Element.__init__(self, "ul", parent)
def __call__( self, *args, **kwargs ):
self.params = args
self.add(NL)
for arg in args:
line = "* " + Utils.html_rest(arg)
self.add(line)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class OlElement(Element):
"""Ol Element."""
def __init__( self, parent=None):
Element.__init__(self, "ol", parent)
def __call__( self, *args, **kwargs ):
self.params = args
self.add(NL)
i = 0
for arg in args:
i +=1
line = str(i) + ". " + Utils.html_rest(arg)
self.add(line)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class FlistElement(Element):
"""Field List Element.
:Date: 2001-08-16
:Version: 1
:Authors: - Me
- Myself
- I
:Indentation: Since the field marker may be quite long, the second
and subsequent lines of the field body do not have to line up
"""
def __init__( self, parent=None):
Element.__init__(self, "flist", parent)
def __call__( self, *args, **kwargs ):
self.params = args
if len(kwargs) == 0:
raise errors.DocumentError("No list fields.")
self.add(NL)
if len(kwargs) > 1:
for field in sorted(kwargs):
value = Utils.html_rest(kwargs[field])
self.add(':'+ field +': ' + value)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class HrElement(Element):
"""Hr or Transaition Element . A transition marker is a horizontal line
of 4 or more repeated punctuation ----- """
def __init__( self, parent=None):
Element.__init__(self, "hr", parent)
def __call__( self, *args, **kwargs ):
self.params = "----"
self.add(NL)
self.add('-----------')
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class PElement(Element):
"""Paragraph """
def __init__( self, parent=None):
Element.__init__(self, "p", parent)
def __call__( self, *args, **kwargs ):
text = ""
if len(kwargs) != 0:
text = kwargs.get('text', "")
elif len(args) != 0:
text = args[0]
text = Utils.html_rest(text)
text = Utils.br_rest(text)
self.add(NL)
self.add(text)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class PreElement(Element):
"""Pre - Literal Block """
def __init__( self, parent=None):
Element.__init__(self, "pre", parent)
def __call__( self, *args, **kwargs ):
text = ""
if len(kwargs) != 0:
text = kwargs.get('text', "")
elif len(args) != 0:
text = args[0]
self.add(NL)
self.add('::')
self.add(NL)
self.add(NL)
self.add(' ')
self.add(text)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class LineblockElement(Element):
"""Line Block
| These lines are
| broken exactly like in
| the source file.
"""
def __init__( self, parent=None):
Element.__init__(self, "pre", parent)
def __call__( self, *args, **kwargs ):
block = ""
if len(args) != 0:
self.add(NL)
for arg in args:
block += "| " + arg + NL
self.add(block)
return self
def __str__( self ):
return Element.__str__(self)
class CommentElement(Element):
"""Comment
.. This text will not be shown
Second line
"""
def __init__( self, parent=None):
Element.__init__(self, "comment", parent)
def __call__( self, *args, **kwargs ):
if len(kwargs) != 0:
text = kwargs.get('text', "")
elif len(args) > 0:
text = args[0]
if text is None:
raise errors.InvalidElementError("text")
self.add(NL)
self.add(NL)
self.add('.. ' + text)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class ImageElement(Element):
"""Image Element.
.. image::images/ball1.tiff
:height: 100px
:width: 200 px
:scale: 50 %
:alt: alternate text
:align: right
"""
def __init__( self, parent=None):
Element.__init__(self, "image", parent)
def __call__( self, *args, **kwargs ):
src = None
options = {}
if len(kwargs) != 0:
src = kwargs.get('src', None)
elif len(args) > 0:
src = args[0]
if src is None:
raise errors.InvalidElementError("src")
self.add(NL)
self.add('.. image:: ' + src)
if len(kwargs) > 1:
for option in sorted(kwargs):
if option != "src":
self.add(NL)
self.add(' :'+option+': ' + kwargs[option])
pass
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class TitleElement(Element):
"""
Titles
# with overline, for parts
* with overline, for chapters
=, for sections
-, for subsections
^, for subsubsections
", for paragraphs
"""
def __init__( self, parent = None):
Element.__init__(self, "title", parent)
def __call__( self, *args, **kwargs ):
text = ""
char = "*"
underline = ""
if len(kwargs) != 0:
text = kwargs.get('text', "")
char = kwargs.get('type', "*")
elif len(args) > 0:
text = args[0]
if len(args) > 1:
char = args[1]
underline = str(char) * len(text)
self.params = [text, underline]
self.add(NL)
self.add(text)
self.add(NL)
self.add(underline)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class LinkElement(Element):
"""
Link Element
`Python <http://www.python.org/>`_
"""
def __init__( self, parent=None):
Element.__init__(self, "link", parent)
def __call__( self, *args, **kwargs ):
text = ""
href = ""
if len(kwargs) != 0:
href = kwargs.get('href', "")
text = kwargs.get('text', "")
elif len(args) != 0:
href = args[0]
text = args[1]
self.params = [text, href]
self.add(NL)
self.add("`%s <%s>`_" % ( text, href ))
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class DirectiveElement(Element):
"""
Directive Element
.. note:: Note
This is content
"""
def __init__( self, parent=None):
Element.__init__(self, "directive", parent)
def __call__( self, *args, **kwargs ):
type = ""
title = ""
text = ""
if len(kwargs) != 0:
type = kwargs.get('type', "")
title = kwargs.get('title', "")
text = kwargs.get('text', "")
elif len(args) > 0:
type = args[0]
if len(args) > 1:
title = args[1]
if len(args) > 2:
text = args[2]
self.params = [type, title, text]
self.add(NL)
self.add(".. %s:: %s" % ( type, title))
self.add(NL)
if len(kwargs) > 1:
for option in sorted(kwargs):
if option != "type" and option != "text" and option != "title":
self.add(' :'+option+': ' + kwargs[option])
self.add(NL)
pass
self.add(NL)
self.add(' ' + text)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class TableElement(Element):
"""
Table Element
"""
def __init__( self, parent=None):
Element.__init__(self, "table", parent)
def __call__( self, *args, **kwargs ):
data = None
if len(args) > 0:
data = args[0]
if len(data) > 0:
self.params = args
table = self.make_table(data)
self.add(NL)
self.add(table)
self.add(NL)
return self
def make_table(self, grid):
max_cols = [max(out) for out in map(list, zip(*[[len(item) for item in row] for row in grid]))]
rst = self.table_div(max_cols, 1)
for i, row in enumerate(grid):
header_flag = False
if i == 0 or i == len(grid)-1: header_flag = True
rst += self.normalize_row(row,max_cols)
rst += self.table_div(max_cols, header_flag )
return rst
def table_div(self, max_cols, header_flag=1):
out = ""
if header_flag == 1:
style = "="
else:
style = "-"
for max_col in max_cols:
out += max_col * style + " "
out += "\n"
return out
def normalize_row(self, row, max_cols):
r = ""
for i, max_col in enumerate(max_cols):
r += row[i] + (max_col - len(row[i]) + 1) * " "
return r + "\n"
def __str__( self ):
return Element.__str__(self)
| 21.003929 | 103 | 0.486484 |
498a60220b89c9a44073f44b772e75d2037f4b30 | 25,994 | py | Python | test/utils_spec_runner.py | blink1073/mongo-python-driver | 98d393336411b7cd5ad4e184ca45192f76fb48e8 | [
"Apache-2.0"
] | null | null | null | test/utils_spec_runner.py | blink1073/mongo-python-driver | 98d393336411b7cd5ad4e184ca45192f76fb48e8 | [
"Apache-2.0"
] | null | null | null | test/utils_spec_runner.py | blink1073/mongo-python-driver | 98d393336411b7cd5ad4e184ca45192f76fb48e8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing driver specs."""
import functools
import threading
from collections import abc
from test import IntegrationTest, client_context, client_knobs
from test.utils import (
CMAPListener,
CompareType,
EventListener,
OvertCommandListener,
ServerAndTopologyEventListener,
camel_to_snake,
camel_to_snake_args,
parse_spec_options,
prepare_spec_arguments,
rs_client,
)
from typing import List
from bson import decode, encode
from bson.binary import Binary
from bson.int64 import Int64
from bson.son import SON
from gridfs import GridFSBucket
from pymongo import client_session
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import Cursor
from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from pymongo.results import BulkWriteResult, _WriteResult
from pymongo.write_concern import WriteConcern
class SpecRunnerThread(threading.Thread):
def __init__(self, name):
super(SpecRunnerThread, self).__init__()
self.name = name
self.exc = None
self.setDaemon(True)
self.cond = threading.Condition()
self.ops = []
self.stopped = False
def schedule(self, work):
self.ops.append(work)
with self.cond:
self.cond.notify()
def stop(self):
self.stopped = True
with self.cond:
self.cond.notify()
def run(self):
while not self.stopped or self.ops:
if not self.ops:
with self.cond:
self.cond.wait(10)
if self.ops:
try:
work = self.ops.pop(0)
work()
except Exception as exc:
self.exc = exc
self.stop()
class SpecRunner(IntegrationTest):
mongos_clients: List
knobs: client_knobs
listener: EventListener
@classmethod
def setUpClass(cls):
super(SpecRunner, cls).setUpClass()
cls.mongos_clients = []
# Speed up the tests by decreasing the heartbeat frequency.
cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1)
cls.knobs.enable()
@classmethod
def tearDownClass(cls):
cls.knobs.disable()
super(SpecRunner, cls).tearDownClass()
def setUp(self):
super(SpecRunner, self).setUp()
self.targets = {}
self.listener = None # type: ignore
self.pool_listener = None
self.server_listener = None
self.maxDiff = None
def _set_fail_point(self, client, command_args):
cmd = SON([("configureFailPoint", "failCommand")])
cmd.update(command_args)
client.admin.command(cmd)
def set_fail_point(self, command_args):
clients = self.mongos_clients if self.mongos_clients else [self.client]
for client in clients:
self._set_fail_point(client, command_args)
def targeted_fail_point(self, session, fail_point):
"""Run the targetedFailPoint test operation.
Enable the fail point on the session's pinned mongos.
"""
clients = {c.address: c for c in self.mongos_clients}
client = clients[session._pinned_address]
self._set_fail_point(client, fail_point)
self.addCleanup(self.set_fail_point, {"mode": "off"})
def assert_session_pinned(self, session):
"""Run the assertSessionPinned test operation.
Assert that the given session is pinned.
"""
self.assertIsNotNone(session._transaction.pinned_address)
def assert_session_unpinned(self, session):
"""Run the assertSessionUnpinned test operation.
Assert that the given session is not pinned.
"""
self.assertIsNone(session._pinned_address)
self.assertIsNone(session._transaction.pinned_address)
def assert_collection_exists(self, database, collection):
"""Run the assertCollectionExists test operation."""
db = self.client[database]
self.assertIn(collection, db.list_collection_names())
def assert_collection_not_exists(self, database, collection):
"""Run the assertCollectionNotExists test operation."""
db = self.client[database]
self.assertNotIn(collection, db.list_collection_names())
def assert_index_exists(self, database, collection, index):
"""Run the assertIndexExists test operation."""
coll = self.client[database][collection]
self.assertIn(index, [doc["name"] for doc in coll.list_indexes()])
def assert_index_not_exists(self, database, collection, index):
"""Run the assertIndexNotExists test operation."""
coll = self.client[database][collection]
self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()])
def assertErrorLabelsContain(self, exc, expected_labels):
labels = [l for l in expected_labels if exc.has_error_label(l)]
self.assertEqual(labels, expected_labels)
def assertErrorLabelsOmit(self, exc, omit_labels):
for label in omit_labels:
self.assertFalse(
exc.has_error_label(label), msg="error labels should not contain %s" % (label,)
)
def kill_all_sessions(self):
clients = self.mongos_clients if self.mongos_clients else [self.client]
for client in clients:
try:
client.admin.command("killAllSessions", [])
except OperationFailure:
# "operation was interrupted" by killing the command's
# own session.
pass
def check_command_result(self, expected_result, result):
# Only compare the keys in the expected result.
filtered_result = {}
for key in expected_result:
try:
filtered_result[key] = result[key]
except KeyError:
pass
self.assertEqual(filtered_result, expected_result)
# TODO: factor the following function with test_crud.py.
def check_result(self, expected_result, result):
if isinstance(result, _WriteResult):
for res in expected_result:
prop = camel_to_snake(res)
# SPEC-869: Only BulkWriteResult has upserted_count.
if prop == "upserted_count" and not isinstance(result, BulkWriteResult):
if result.upserted_id is not None:
upserted_count = 1
else:
upserted_count = 0
self.assertEqual(upserted_count, expected_result[res], prop)
elif prop == "inserted_ids":
# BulkWriteResult does not have inserted_ids.
if isinstance(result, BulkWriteResult):
self.assertEqual(len(expected_result[res]), result.inserted_count)
else:
# InsertManyResult may be compared to [id1] from the
# crud spec or {"0": id1} from the retryable write spec.
ids = expected_result[res]
if isinstance(ids, dict):
ids = [ids[str(i)] for i in range(len(ids))]
self.assertEqual(ids, result.inserted_ids, prop)
elif prop == "upserted_ids":
# Convert indexes from strings to integers.
ids = expected_result[res]
expected_ids = {}
for str_index in ids:
expected_ids[int(str_index)] = ids[str_index]
self.assertEqual(expected_ids, result.upserted_ids, prop)
else:
self.assertEqual(getattr(result, prop), expected_result[res], prop)
return True
else:
def _helper(expected_result, result):
if isinstance(expected_result, abc.Mapping):
for i in expected_result.keys():
self.assertEqual(expected_result[i], result[i])
elif isinstance(expected_result, list):
for i, k in zip(expected_result, result):
_helper(i, k)
else:
self.assertEqual(expected_result, result)
_helper(expected_result, result)
def get_object_name(self, op):
"""Allow subclasses to override handling of 'object'
Transaction spec says 'object' is required.
"""
return op["object"]
@staticmethod
def parse_options(opts):
return parse_spec_options(opts)
def run_operation(self, sessions, collection, operation):
original_collection = collection
name = camel_to_snake(operation["name"])
if name == "run_command":
name = "command"
elif name == "download_by_name":
name = "open_download_stream_by_name"
elif name == "download":
name = "open_download_stream"
elif name == "map_reduce":
self.skipTest("PyMongo does not support mapReduce")
elif name == "count":
self.skipTest("PyMongo does not support count")
database = collection.database
collection = database.get_collection(collection.name)
if "collectionOptions" in operation:
collection = collection.with_options(
**self.parse_options(operation["collectionOptions"])
)
object_name = self.get_object_name(operation)
if object_name == "gridfsbucket":
# Only create the GridFSBucket when we need it (for the gridfs
# retryable reads tests).
obj = GridFSBucket(database, bucket_name=collection.name)
else:
objects = {
"client": database.client,
"database": database,
"collection": collection,
"testRunner": self,
}
objects.update(sessions)
obj = objects[object_name]
# Combine arguments with options and handle special cases.
arguments = operation.get("arguments", {})
arguments.update(arguments.pop("options", {}))
self.parse_options(arguments)
cmd = getattr(obj, name)
with_txn_callback = functools.partial(
self.run_operations, sessions, original_collection, in_with_transaction=True
)
prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback)
if name == "run_on_thread":
args = {"sessions": sessions, "collection": collection}
args.update(arguments)
arguments = args
try:
if name == "create_collection" and (
"encrypted" in operation["arguments"]["name"]
or "plaintext" in operation["arguments"]["name"]
):
self.listener.ignore_list_collections = True
result = cmd(**dict(arguments))
finally:
self.listener.ignore_list_collections = False
# Cleanup open change stream cursors.
if name == "watch":
self.addCleanup(result.close)
if name == "aggregate":
if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
# Read from the primary to ensure causal consistency.
out = collection.database.get_collection(
arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY
)
return out.find()
if "download" in name:
result = Binary(result.read())
if isinstance(result, Cursor) or isinstance(result, CommandCursor):
return list(result)
return result
def allowable_errors(self, op):
"""Allow encryption spec to override expected error classes."""
return (PyMongoError,)
def _run_op(self, sessions, collection, op, in_with_transaction):
expected_result = op.get("result")
if expect_error(op):
with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context:
out = self.run_operation(sessions, collection, op.copy())
if expect_error_message(expected_result):
if isinstance(context.exception, BulkWriteError):
errmsg = str(context.exception.details).lower()
else:
errmsg = str(context.exception).lower()
self.assertIn(expected_result["errorContains"].lower(), errmsg)
if expect_error_code(expected_result):
self.assertEqual(
expected_result["errorCodeName"], context.exception.details.get("codeName")
)
if expect_error_labels_contain(expected_result):
self.assertErrorLabelsContain(
context.exception, expected_result["errorLabelsContain"]
)
if expect_error_labels_omit(expected_result):
self.assertErrorLabelsOmit(context.exception, expected_result["errorLabelsOmit"])
# Reraise the exception if we're in the with_transaction
# callback.
if in_with_transaction:
raise context.exception
else:
result = self.run_operation(sessions, collection, op.copy())
if "result" in op:
if op["name"] == "runCommand":
self.check_command_result(expected_result, result)
else:
self.check_result(expected_result, result)
def run_operations(self, sessions, collection, ops, in_with_transaction=False):
for op in ops:
self._run_op(sessions, collection, op, in_with_transaction)
# TODO: factor with test_command_monitoring.py
def check_events(self, test, listener, session_ids):
res = listener.results
if not len(test["expectations"]):
return
# Give a nicer message when there are missing or extra events
cmds = decode_raw([event.command for event in res["started"]])
self.assertEqual(len(res["started"]), len(test["expectations"]), cmds)
for i, expectation in enumerate(test["expectations"]):
event_type = next(iter(expectation))
event = res["started"][i]
# The tests substitute 42 for any number other than 0.
if event.command_name == "getMore" and event.command["getMore"]:
event.command["getMore"] = Int64(42)
elif event.command_name == "killCursors":
event.command["cursors"] = [Int64(42)]
elif event.command_name == "update":
# TODO: remove this once PYTHON-1744 is done.
# Add upsert and multi fields back into expectations.
updates = expectation[event_type]["command"]["updates"]
for update in updates:
update.setdefault("upsert", False)
update.setdefault("multi", False)
# Replace afterClusterTime: 42 with actual afterClusterTime.
expected_cmd = expectation[event_type]["command"]
expected_read_concern = expected_cmd.get("readConcern")
if expected_read_concern is not None:
time = expected_read_concern.get("afterClusterTime")
if time == 42:
actual_time = event.command.get("readConcern", {}).get("afterClusterTime")
if actual_time is not None:
expected_read_concern["afterClusterTime"] = actual_time
recovery_token = expected_cmd.get("recoveryToken")
if recovery_token == 42:
expected_cmd["recoveryToken"] = CompareType(dict)
# Replace lsid with a name like "session0" to match test.
if "lsid" in event.command:
for name, lsid in session_ids.items():
if event.command["lsid"] == lsid:
event.command["lsid"] = name
break
for attr, expected in expectation[event_type].items():
actual = getattr(event, attr)
expected = wrap_types(expected)
if isinstance(expected, dict):
for key, val in expected.items():
if val is None:
if key in actual:
self.fail("Unexpected key [%s] in %r" % (key, actual))
elif key not in actual:
self.fail("Expected key [%s] in %r" % (key, actual))
else:
self.assertEqual(
val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual)
)
else:
self.assertEqual(actual, expected)
def maybe_skip_scenario(self, test):
if test.get("skipReason"):
self.skipTest(test.get("skipReason"))
def get_scenario_db_name(self, scenario_def):
"""Allow subclasses to override a test's database name."""
return scenario_def["database_name"]
def get_scenario_coll_name(self, scenario_def):
"""Allow subclasses to override a test's collection name."""
return scenario_def["collection_name"]
def get_outcome_coll_name(self, outcome, collection):
"""Allow subclasses to override outcome collection."""
return collection.name
def run_test_ops(self, sessions, collection, test):
"""Added to allow retryable writes spec to override a test's
operation."""
self.run_operations(sessions, collection, test["operations"])
def parse_client_options(self, opts):
"""Allow encryption spec to override a clientOptions parsing."""
# Convert test['clientOptions'] to dict to avoid a Jython bug using
# "**" with ScenarioDict.
return dict(opts)
def setup_scenario(self, scenario_def):
"""Allow specs to override a test's setup."""
db_name = self.get_scenario_db_name(scenario_def)
coll_name = self.get_scenario_coll_name(scenario_def)
documents = scenario_def["data"]
# Setup the collection with as few majority writes as possible.
db = client_context.client.get_database(db_name)
coll_exists = bool(db.list_collection_names(filter={"name": coll_name}))
if coll_exists:
db[coll_name].delete_many({})
# Only use majority wc only on the final write.
wc = WriteConcern(w="majority")
if documents:
db.get_collection(coll_name, write_concern=wc).insert_many(documents)
elif not coll_exists:
# Ensure collection exists.
db.create_collection(coll_name, write_concern=wc)
def run_scenario(self, scenario_def, test):
self.maybe_skip_scenario(test)
# Kill all sessions before and after each test to prevent an open
# transaction (from a test failure) from blocking collection/database
# operations during test set up and tear down.
self.kill_all_sessions()
self.addCleanup(self.kill_all_sessions)
self.setup_scenario(scenario_def)
database_name = self.get_scenario_db_name(scenario_def)
collection_name = self.get_scenario_coll_name(scenario_def)
# SPEC-1245 workaround StaleDbVersion on distinct
for c in self.mongos_clients:
c[database_name][collection_name].distinct("x")
# Configure the fail point before creating the client.
if "failPoint" in test:
fp = test["failPoint"]
self.set_fail_point(fp)
self.addCleanup(
self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"}
)
listener = OvertCommandListener()
pool_listener = CMAPListener()
server_listener = ServerAndTopologyEventListener()
# Create a new client, to avoid interference from pooled sessions.
client_options = self.parse_client_options(test["clientOptions"])
# MMAPv1 does not support retryable writes.
if client_options.get("retryWrites") is True and client_context.storage_engine == "mmapv1":
self.skipTest("MMAPv1 does not support retryWrites=True")
use_multi_mongos = test["useMultipleMongoses"]
host = None
if use_multi_mongos:
if client_context.load_balancer or client_context.serverless:
host = client_context.MULTI_MONGOS_LB_URI
elif client_context.is_mongos:
host = client_context.mongos_seeds()
client = rs_client(
h=host, event_listeners=[listener, pool_listener, server_listener], **client_options
)
self.scenario_client = client
self.listener = listener
self.pool_listener = pool_listener
self.server_listener = server_listener
# Close the client explicitly to avoid having too many threads open.
self.addCleanup(client.close)
# Create session0 and session1.
sessions = {}
session_ids = {}
for i in range(2):
# Don't attempt to create sessions if they are not supported by
# the running server version.
if not client_context.sessions_enabled:
break
session_name = "session%d" % i
opts = camel_to_snake_args(test["sessionOptions"][session_name])
if "default_transaction_options" in opts:
txn_opts = self.parse_options(opts["default_transaction_options"])
txn_opts = client_session.TransactionOptions(**txn_opts)
opts["default_transaction_options"] = txn_opts
s = client.start_session(**dict(opts))
sessions[session_name] = s
# Store lsid so we can access it after end_session, in check_events.
session_ids[session_name] = s.session_id
self.addCleanup(end_sessions, sessions)
collection = client[database_name][collection_name]
self.run_test_ops(sessions, collection, test)
end_sessions(sessions)
self.check_events(test, listener, session_ids)
# Disable fail points.
if "failPoint" in test:
fp = test["failPoint"]
self.set_fail_point({"configureFailPoint": fp["configureFailPoint"], "mode": "off"})
# Assert final state is expected.
outcome = test["outcome"]
expected_c = outcome.get("collection")
if expected_c is not None:
outcome_coll_name = self.get_outcome_coll_name(outcome, collection)
# Read from the primary with local read concern to ensure causal
# consistency.
outcome_coll = client_context.client[collection.database.name].get_collection(
outcome_coll_name,
read_preference=ReadPreference.PRIMARY,
read_concern=ReadConcern("local"),
)
actual_data = list(outcome_coll.find(sort=[("_id", 1)]))
# The expected data needs to be the left hand side here otherwise
# CompareType(Binary) doesn't work.
self.assertEqual(wrap_types(expected_c["data"]), actual_data)
def expect_any_error(op):
if isinstance(op, dict):
return op.get("error")
return False
def expect_error_message(expected_result):
if isinstance(expected_result, dict):
return isinstance(expected_result["errorContains"], str)
return False
def expect_error_code(expected_result):
if isinstance(expected_result, dict):
return expected_result["errorCodeName"]
return False
def expect_error_labels_contain(expected_result):
if isinstance(expected_result, dict):
return expected_result["errorLabelsContain"]
return False
def expect_error_labels_omit(expected_result):
if isinstance(expected_result, dict):
return expected_result["errorLabelsOmit"]
return False
def expect_error(op):
expected_result = op.get("result")
return (
expect_any_error(op)
or expect_error_message(expected_result)
or expect_error_code(expected_result)
or expect_error_labels_contain(expected_result)
or expect_error_labels_omit(expected_result)
)
def end_sessions(sessions):
for s in sessions.values():
# Aborts the transaction if it's open.
s.end_session()
def decode_raw(val):
"""Decode RawBSONDocuments in the given container."""
if isinstance(val, (list, abc.Mapping)):
return decode(encode({"v": val}))["v"]
return val
TYPES = {
"binData": Binary,
"long": Int64,
}
def wrap_types(val):
"""Support $$type assertion in command results."""
if isinstance(val, list):
return [wrap_types(v) for v in val]
if isinstance(val, abc.Mapping):
typ = val.get("$$type")
if typ:
return CompareType(TYPES[typ])
d = {}
for key in val:
d[key] = wrap_types(val[key])
return d
return val
| 38.739195 | 100 | 0.616527 |
0010498eb0df81455022d6e3c08e4d15ac3d9906 | 312 | py | Python | actionslog/forms.py | nolifeinsense/django-actions-logger | 59a8bf7a866ec78f44ea2d8f3bea7f2d9a2d533b | [
"MIT"
] | 27 | 2016-01-27T21:30:08.000Z | 2021-04-18T18:24:22.000Z | actionslog/forms.py | nolifeinsense/django-actions-logger | 59a8bf7a866ec78f44ea2d8f3bea7f2d9a2d533b | [
"MIT"
] | 6 | 2016-02-04T16:05:45.000Z | 2021-04-14T11:00:00.000Z | actionslog/forms.py | nolifeinsense/django-actions-logger | 59a8bf7a866ec78f44ea2d8f3bea7f2d9a2d533b | [
"MIT"
] | 10 | 2016-02-16T05:32:06.000Z | 2022-03-09T01:28:24.000Z |
from django import forms
from django.utils.functional import lazy
from . import settings as app_conf
def get_action_choices():
return app_conf.LOG_ACTION_CHOICES
class LogActionForm(forms.ModelForm):
# lots of fields like this
action = forms.ChoiceField(choices=lazy(get_action_choices, list)())
| 24 | 72 | 0.782051 |
6e66c82856fc4a887b28499eb6a337cab7c40a72 | 189 | py | Python | test/tests/str_default.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | test/tests/str_default.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
] | null | null | null | test/tests/str_default.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | class C(object):
def __repr__(self):
print "repr"
return 1
c = C()
print "C object" in object.__repr__(c)
print object.__str__(c)
print c.__repr__()
print c.__str__()
| 15.75 | 38 | 0.634921 |
b4f87b2a342718c8f697f987f5cc02f01d06268e | 9,777 | py | Python | openmdao/surrogate_models/kriging.py | OwenMcDonnell/OpenMDAO | 72695acd3ff9ab2086c4f9dd87688b803e773899 | [
"Apache-2.0"
] | 1 | 2016-05-10T17:01:17.000Z | 2016-05-10T17:01:17.000Z | openmdao/surrogate_models/kriging.py | gsoxley/OpenMDAO | 709401e535cf6933215abd942d4b4d49dbf61b2b | [
"Apache-2.0"
] | 3 | 2016-05-10T16:55:46.000Z | 2018-10-22T23:28:52.000Z | openmdao/surrogate_models/kriging.py | gsoxley/OpenMDAO | 709401e535cf6933215abd942d4b4d49dbf61b2b | [
"Apache-2.0"
] | 2 | 2018-04-05T15:53:54.000Z | 2018-10-22T22:48:00.000Z | """Surrogate model based on Kriging."""
import numpy as np
import scipy.linalg as linalg
from scipy.optimize import minimize
from six.moves import zip, range
from openmdao.surrogate_models.surrogate_model import SurrogateModel
MACHINE_EPSILON = np.finfo(np.double).eps
class KrigingSurrogate(SurrogateModel):
"""
Surrogate Modeling method based on the simple Kriging interpolation.
Predictions are returned as a tuple of mean and RMSE. Based on Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams. (see also: scikit-learn).
Attributes
----------
alpha : ndarray
Reduced likelihood parameter: alpha
eval_rmse : bool
When true, calculate the root mean square prediction error.
L : ndarray
Reduced likelihood parameter: L
n_dims : int
Number of independents in the surrogate
n_samples : int
Number of training points.
nugget : double or ndarray, optional
Nugget smoothing parameter for smoothing noisy data. Represents the variance
of the input values. If nugget is an ndarray, it must be of the same length
as the number of training points. Default: 10. * Machine Epsilon
sigma2 : ndarray
Reduced likelihood parameter: sigma squared
thetas : ndarray
Kriging hyperparameters.
X : ndarray
Training input values, normalized.
X_mean : ndarray
Mean of training input values, normalized.
X_std : ndarray
Standard deviation of training input values, normalized.
Y : ndarray
Training model response values, normalized.
Y_mean : ndarray
Mean of training model response values, normalized.
Y_std : ndarray
Standard deviation of training model response values, normalized.
"""
def __init__(self, nugget=10. * MACHINE_EPSILON, eval_rmse=False):
"""
Initialize all attributes.
Parameters
----------
nugget : double or ndarray, optional
Nugget smoothing parameter for smoothing noisy data. Represents the variance
of the input values. If nugget is an ndarray, it must be of the same length
as the number of training points. Default: 10. * Machine Epsilon
eval_rmse : bool
Flag indicating whether the Root Mean Squared Error (RMSE) should be computed.
Set to False by default.
"""
super(KrigingSurrogate, self).__init__()
self.n_dims = 0 # number of independent
self.n_samples = 0 # number of training points
self.thetas = np.zeros(0)
# nugget smoothing parameter from [Sasena, 2002]
self.nugget = nugget
self.alpha = np.zeros(0)
self.L = np.zeros(0)
self.sigma2 = np.zeros(0)
# Normalized Training Values
self.X = np.zeros(0)
self.Y = np.zeros(0)
self.X_mean = np.zeros(0)
self.X_std = np.zeros(0)
self.Y_mean = np.zeros(0)
self.Y_std = np.zeros(0)
self.eval_rmse = eval_rmse
def train(self, x, y):
"""
Train the surrogate model with the given set of inputs and outputs.
Parameters
----------
x : array-like
Training input locations
y : array-like
Model responses at given inputs.
"""
super(KrigingSurrogate, self).train(x, y)
x, y = np.atleast_2d(x, y)
self.n_samples, self.n_dims = x.shape
if self.n_samples <= 1:
raise ValueError('KrigingSurrogate require at least 2 training points.')
# Normalize the data
X_mean = np.mean(x, axis=0)
X_std = np.std(x, axis=0)
Y_mean = np.mean(y, axis=0)
Y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
Y_std[Y_std == 0.] = 1.
X = (x - X_mean) / X_std
Y = (y - Y_mean) / Y_std
self.X = X
self.Y = Y
self.X_mean, self.X_std = X_mean, X_std
self.Y_mean, self.Y_std = Y_mean, Y_std
def _calcll(thetas):
"""Calculate loglike (callback function)."""
loglike = self._calculate_reduced_likelihood_params(np.exp(thetas))[0]
return -loglike
bounds = [(np.log(1e-5), np.log(1e5)) for _ in range(self.n_dims)]
optResult = minimize(_calcll, 1e-1 * np.ones(self.n_dims), method='slsqp',
options={'eps': 1e-3},
bounds=bounds)
if not optResult.success:
raise ValueError(
'Kriging Hyper-parameter optimization failed: {0}'.format(optResult.message))
self.thetas = np.exp(optResult.x)
_, params = self._calculate_reduced_likelihood_params()
self.alpha = params['alpha']
self.U = params['U']
self.S_inv = params['S_inv']
self.Vh = params['Vh']
self.sigma2 = params['sigma2']
def _calculate_reduced_likelihood_params(self, thetas=None):
"""
Calculate quantity with same maximum location as the log-likelihood for a given theta.
Parameters
----------
thetas : ndarray, optional
Given input correlation coefficients. If none given, uses self.thetas
from training.
Returns
-------
ndarray
Calculated reduced_likelihood
dict
Dictionary containing the parameters.
"""
if thetas is None:
thetas = self.thetas
X, Y = self.X, self.Y
params = {}
# Correlation Matrix
distances = np.zeros((self.n_samples, self.n_dims, self.n_samples))
for i in range(self.n_samples):
distances[i, :, i + 1:] = np.abs(X[i, ...] - X[i + 1:, ...]).T
distances[i + 1:, :, i] = distances[i, :, i + 1:].T
R = np.exp(-thetas.dot(np.square(distances)))
R[np.diag_indices_from(R)] = 1. + self.nugget
[U, S, Vh] = linalg.svd(R)
# Penrose-Moore Pseudo-Inverse:
# Given A = USV^* and Ax=b, the least-squares solution is
# x = V S^-1 U^* b.
# Tikhonov regularization is used to make the solution significantly
# more robust.
h = 1e-8 * S[0]
inv_factors = S / (S ** 2. + h ** 2.)
alpha = Vh.T.dot(np.einsum('j,kj,kl->jl', inv_factors, U, Y))
logdet = -np.sum(np.log(inv_factors))
sigma2 = np.dot(Y.T, alpha).sum(axis=0) / self.n_samples
reduced_likelihood = -(np.log(np.sum(sigma2)) +
logdet / self.n_samples)
params['alpha'] = alpha
params['sigma2'] = sigma2 * np.square(self.Y_std)
params['S_inv'] = inv_factors
params['U'] = U
params['Vh'] = Vh
return reduced_likelihood, params
def predict(self, x):
"""
Calculate predicted value of the response based on the current trained model.
Parameters
----------
x : array-like
Point at which the surrogate is evaluated.
Returns
-------
ndarray
Kriging prediction.
ndarray, optional (if eval_rmse is True)
Root mean square of the prediction error.
"""
super(KrigingSurrogate, self).predict(x)
thetas = self.thetas
if isinstance(x, list):
x = np.array(x)
x = np.atleast_2d(x)
n_eval = x.shape[0]
# Normalize input
x_n = (x - self.X_mean) / self.X_std
r = np.zeros((n_eval, self.n_samples), dtype=x.dtype)
for r_i, x_i in zip(r, x_n):
r_i[:] = np.exp(-thetas.dot(np.square((x_i - self.X).T)))
# Scaled Predictor
y_t = np.dot(r, self.alpha)
# Predictor
y = self.Y_mean + self.Y_std * y_t
if self.eval_rmse:
mse = (1. - np.dot(np.dot(r, self.Vh.T),
np.einsum('j,kj,lk->jl', self.S_inv, self.U, r))) * self.sigma2
# Forcing negative RMSE to zero if negative due to machine precision
mse[mse < 0.] = 0.
return y, np.sqrt(mse)
return y
def linearize(self, x):
"""
Calculate the jacobian of the Kriging surface at the requested point.
Parameters
----------
x : array-like
Point at which the surrogate Jacobian is evaluated.
Returns
-------
ndarray
Jacobian of surrogate output wrt inputs.
"""
thetas = self.thetas
# Normalize Input
x_n = (x - self.X_mean) / self.X_std
r = np.exp(-thetas.dot(np.square((x_n - self.X).T)))
# Z = einsum('i,ij->ij', X, Y) is equivalent to, but much faster and
# memory efficient than, diag(X).dot(Y) for vector X and 2D array Y.
# i.e., Z[i,j] = X[i]*Y[i,j]
gradr = r * -2 * np.einsum('i,ij->ij', thetas, (x_n - self.X).T)
jac = np.einsum('i,j,ij->ij', self.Y_std, 1. /
self.X_std, gradr.dot(self.alpha).T)
return jac
class FloatKrigingSurrogate(KrigingSurrogate):
"""
Surrogate model based on the simple Kriging interpolation.
Predictions are returned as floats, which are the mean of the model's prediction.
"""
def predict(self, x):
"""
Calculate predicted value of response based on the current trained model.
Parameters
----------
x : array-like
Point at which the surrogate is evaluated.
Returns
-------
float
Mean value of kriging prediction.
"""
dist = super(FloatKrigingSurrogate, self).predict(x)
return dist[0] # mean value
| 31.743506 | 94 | 0.571955 |
f84da9caa36e8ed9911ad32d7da30f5b90092d59 | 2,967 | py | Python | paddlenlp/datasets/cail2019_scm.py | JunnYu/ConvBERT-Prod | a1351e1e7f9400cb8c71d0a15d23629b4cb055d4 | [
"Apache-2.0"
] | 11 | 2022-01-06T07:39:47.000Z | 2022-03-22T06:18:40.000Z | paddlenlp/datasets/cail2019_scm.py | JunnYu/ConvBERT-Prod | a1351e1e7f9400cb8c71d0a15d23629b4cb055d4 | [
"Apache-2.0"
] | null | null | null | paddlenlp/datasets/cail2019_scm.py | JunnYu/ConvBERT-Prod | a1351e1e7f9400cb8c71d0a15d23629b4cb055d4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url
from paddlenlp.utils.env import DATA_HOME
from . import DatasetBuilder
__all__ = ['CAIL2019_SCM']
class CAIL2019_SCM(DatasetBuilder):
'''
CAIL2019-SCM contains 8,964 triplets of cases published by the Supreme People's
Court of China. The input of CAIL2019-SCM is a triplet (A, B, C), where A, B, C
are fact descriptions of three cases. The task of CAIL2019-SCM is to predict
whether sim(A, B) > sim(A, C) or sim(A, C) > sim(A, B).
See more details on https://arxiv.org/abs/1911.08962.
'''
META_INFO = collections.namedtuple('META_INFO', ('file', 'md5', 'URL'))
SPLITS = {
'train': META_INFO(
os.path.join('cail2019_scm_train.json'),
'd50a105f9689e72be7d79adbba0ae224',
'https://bj.bcebos.com/paddlenlp/datasets/cail2019/scm/cail2019_scm_train.json'
),
'dev': META_INFO(
os.path.join('cail2019_scm_dev.json'),
'e36a295c1cb8c6b9fb28015907a42d9e',
'https://bj.bcebos.com/paddlenlp/datasets/cail2019/scm/cail2019_scm_dev.json'
),
'test': META_INFO(
os.path.join('cail2019_scm_test.json'),
'91a6cf060e1283f05fcc6a2027238379',
'https://bj.bcebos.com/paddlenlp/datasets/cail2019/scm/cail2019_scm_test.json'
)
}
def _get_data(self, mode, **kwargs):
default_root = os.path.join(DATA_HOME, self.__class__.__name__)
filename, data_hash, URL = self.SPLITS[mode]
fullname = os.path.join(default_root, filename)
if not os.path.exists(fullname) or (
data_hash and not md5file(fullname) == data_hash):
get_path_from_url(URL, default_root)
return fullname
def _read(self, filename, *args):
with open(filename, "r", encoding="utf8") as f:
for line in f.readlines():
dic = json.loads(line)
yield {
"text_a": dic["A"],
"text_b": dic["B"],
"text_c": dic["C"],
"label": dic["label"]
}
def get_labels(self):
"""
Return labels of the CAIL2019_SCM object.
"""
return ["B", "C"]
| 36.62963 | 91 | 0.634985 |
a01539f1c8dcb18d0cd809f438c890f07adb5efc | 4,604 | py | Python | src/python/peanoclaw/callbacks/solvercallback.py | unterweg/peanoclaw | 2d8b45727e3b26d824f8afc6a8772736176083af | [
"BSD-3-Clause"
] | 1 | 2015-07-14T10:05:52.000Z | 2015-07-14T10:05:52.000Z | src/python/peanoclaw/callbacks/solvercallback.py | unterweg/peanoclaw | 2d8b45727e3b26d824f8afc6a8772736176083af | [
"BSD-3-Clause"
] | null | null | null | src/python/peanoclaw/callbacks/solvercallback.py | unterweg/peanoclaw | 2d8b45727e3b26d824f8afc6a8772736176083af | [
"BSD-3-Clause"
] | 1 | 2019-12-03T15:58:53.000Z | 2019-12-03T15:58:53.000Z | '''
Created on Jan 29, 2013
@author: kristof
'''
import time
from ctypes import c_bool
from ctypes import c_double
from ctypes import c_int
from ctypes import CFUNCTYPE
from ctypes import py_object
from ctypes import POINTER
class SolverCallback(object):
'''
This class encapsulates the callback for solving one timestep on
a single subgrid.
'''
#Callback definition
CALLBACK_SOLVER = CFUNCTYPE(c_double,
POINTER(c_double), #Return array
py_object, #q
py_object, #qbc
py_object, #aux
c_int, #subdivision factor X0
c_int, #subdivision factor X1
c_int, #subdivision factor X2
c_int, #unknowns per cell
c_int, #aux fields per cell
c_double, c_double, c_double, #size
c_double, c_double, c_double, #position
c_double, #current time
c_double, #maximum timestep size
c_double, #estimated next timestep size
c_bool) #use dimensional splitting
def __init__(self, solver, refinement_criterion, initial_minimal_mesh_width, fixed_timestep_size):
'''
Constructor
'''
self.solver = solver
self.refinement_criterion = refinement_criterion
self.initial_minimal_mesh_width = initial_minimal_mesh_width
self.fixed_timestep_size = fixed_timestep_size
#Statistics
self.number_of_non_disposed_cells = 0
self.number_of_rollbacks = 0
self.total_solver_time = 0.0
self.callback = None
def get_solver_callback(self):
r"""
Creates a closure for the solver callback method.
"""
def callback_solver(return_dt_and_estimated_next_dt, q, qbc, aux, subdivision_factor_x0, subdivision_factor_x1, subdivision_factor_x2, unknowns_per_cell, aux_fields_per_cell, size_x, size_y, size_z, position_x, position_y, position_z, current_time, maximum_timestep_size, estimated_next_dt, use_dimensional_splitting):
return self.call_subgridsolver(return_dt_and_estimated_next_dt, q, qbc, aux, subdivision_factor_x0, subdivision_factor_x1, subdivision_factor_x2, unknowns_per_cell, aux_fields_per_cell, size_x, size_y, size_z, position_x, position_y, position_z, current_time, maximum_timestep_size, estimated_next_dt, use_dimensional_splitting)
if not self.callback:
self.callback = self.CALLBACK_SOLVER(callback_solver)
return self.callback
def call_subgridsolver(self, return_dt_and_estimated_next_dt, q, qbc, aux, subdivision_factor_x0, subdivision_factor_x1, subdivision_factor_x2, unknowns_per_cell, aux_fields_per_cell, size_x, size_y, size_z, position_x, position_y, position_z, current_time, maximum_timestep_size, estimated_next_dt, use_dimensional_splitting):
"""
Sets up a subgridsolver and calls it for doing the timestep.
Retrieves the new data and calls the refinement criterion.
"""
starttime = time.time()
# Fix aux array
if(aux_fields_per_cell == 0):
aux = None
# Set up grid information for current patch
import peanoclaw
subgridsolver = peanoclaw.SubgridSolver(
self.solver.solver,
self.solver.solution.state,
q,
qbc,
aux,
(position_x, position_y, position_z),
(size_x, size_y, size_z),
(subdivision_factor_x0, subdivision_factor_x1, subdivision_factor_x2),
unknowns_per_cell,
aux_fields_per_cell,
current_time)
new_q, number_of_rollbacks = subgridsolver.step(maximum_timestep_size, estimated_next_dt, self.fixed_timestep_size)
# Copy back the array with new values
q[:]= new_q[:]
self.solver.solution.t = subgridsolver.solution.t
self.number_of_rollbacks += number_of_rollbacks
return_dt_and_estimated_next_dt[0] = self.solver.solution.t - current_time
return_dt_and_estimated_next_dt[1] = self.solver.solver.dt
#Clean up
if self.number_of_non_disposed_cells >= 1e6:
import gc
gc.collect()
self.number_of_non_disposed_cells = 0
else:
self.number_of_non_disposed_cells += qbc.shape[1] * qbc.shape[2]
#Steer refinement
if self.refinement_criterion == None:
return self.initial_minimal_mesh_width
else:
return self.refinement_criterion(subgridsolver.solution.state)
| 39.016949 | 336 | 0.672024 |
6a1df7ae9fb5b5e4562bbcadb568495d3f26eba2 | 1,330 | py | Python | tests/standalone_apps/namespace_pkg_config_source_test/namespace_test/test_namespace.py | samuelstanton/hydra | 9bf7800157692795090f3695efe136bbbd6fef1d | [
"MIT"
] | 1 | 2021-09-29T06:24:36.000Z | 2021-09-29T06:24:36.000Z | tests/standalone_apps/namespace_pkg_config_source_test/namespace_test/test_namespace.py | pkassotis/hydra | 2d859b664e5ba69d2ad74427f7362406cdd7e170 | [
"MIT"
] | null | null | null | tests/standalone_apps/namespace_pkg_config_source_test/namespace_test/test_namespace.py | pkassotis/hydra | 2d859b664e5ba69d2ad74427f7362406cdd7e170 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pytest import mark, param
from hydra._internal.core_plugins.importlib_resources_config_source import (
ImportlibResourcesConfigSource,
)
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import initialize
from hydra.test_utils.config_source_common_tests import ConfigSourceTestSuite
from hydra.test_utils.test_utils import chdir_plugin_root
chdir_plugin_root()
@mark.parametrize(
"type_, path",
[
param(
ImportlibResourcesConfigSource,
"pkg://some_namespace.namespace_test.dir",
id="pkg_in_namespace",
),
],
)
class TestCoreConfigSources(ConfigSourceTestSuite):
pass
def test_config_in_dir() -> None:
with initialize(config_path="../some_namespace/namespace_test/dir"):
config_loader = GlobalHydra.instance().config_loader()
assert "cifar10" in config_loader.get_group_options("dataset")
assert "imagenet" in config_loader.get_group_options("dataset")
assert "level1" in config_loader.list_groups("")
assert "level2" in config_loader.list_groups("level1")
assert "nested1" in config_loader.get_group_options("level1/level2")
assert "nested2" in config_loader.get_group_options("level1/level2")
| 35 | 77 | 0.742105 |
ebb00fc436874cf61365f3b682946c8f3a03f001 | 1,074 | py | Python | tests/runtime/columntest.py | dcramer/jinja1-djangosupport | 755287f155c18ccabe69f1318bacdaca14f55da3 | [
"BSD-3-Clause"
] | 2 | 2015-09-24T19:53:35.000Z | 2015-11-06T10:47:02.000Z | tests/runtime/columntest.py | dcramer/jinja1-djangosupport | 755287f155c18ccabe69f1318bacdaca14f55da3 | [
"BSD-3-Clause"
] | null | null | null | tests/runtime/columntest.py | dcramer/jinja1-djangosupport | 755287f155c18ccabe69f1318bacdaca14f55da3 | [
"BSD-3-Clause"
] | null | null | null | import jdebug
from jinja import from_string
template = from_string(u'''\
<h1>Unfilled</h1>
<div class="index">
{%- for column in items|slice(3) %}
<div class="col-{{ loop.index }}">
<ul>
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
</div>
{%- endfor %}
</div>
<h1>Filled</h1>
<div class="index">
{%- for column in items|slice(3, 'missing') %}
<div class="col-{{ loop.index }}">
<ul>
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
</div>
{%- endfor %}
</div>
<h1>Filled Table</h1>
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
<h1>Unfilled Table</h1>
<table>
{%- for row in items|batch(3) %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
{%- if row|length < 3 %}
<td colspan="{{ 3 - (row|length) }}"> </td>
{%- endif %}
</tr>
{%- endfor %}
</table>''')
print template.render(items=range(16))
| 18.517241 | 52 | 0.497207 |
888c33dfa7f44fa8d4ddb0d9a465ed3e092de931 | 227 | py | Python | pfstratsim/solvers/__init__.py | aarondorffeld/portfolio-strategy-simulation | 8c4771df24e3c45865c7df2a68e51ef018f7be1b | [
"MIT"
] | null | null | null | pfstratsim/solvers/__init__.py | aarondorffeld/portfolio-strategy-simulation | 8c4771df24e3c45865c7df2a68e51ef018f7be1b | [
"MIT"
] | 42 | 2021-11-06T15:19:49.000Z | 2022-01-23T16:38:21.000Z | pfstratsim/solvers/__init__.py | aarondorffeld/portfolio-strategy-simulation | 8c4771df24e3c45865c7df2a68e51ef018f7be1b | [
"MIT"
] | null | null | null | from .solver import Solver
from .equal_proportion import EqualProportion
from .mathematical_programming import MathematicalProgramming
__all__ = [
"Solver",
"EqualProportion",
"MathematicalProgramming",
]
| 22.7 | 62 | 0.753304 |
a0b4f39621b7c1854d52758c011018758d1339c3 | 561 | py | Python | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyNumber/auto_rest_number_test_service/operations/__init__.py | ljhljh235/AutoRest | b9ab4000e9b93d16925db84d08bafc225b098f8e | [
"MIT"
] | 3 | 2018-03-20T22:36:32.000Z | 2021-07-15T02:36:51.000Z | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyNumber/auto_rest_number_test_service/operations/__init__.py | ljhljh235/AutoRest | b9ab4000e9b93d16925db84d08bafc225b098f8e | [
"MIT"
] | null | null | null | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyNumber/auto_rest_number_test_service/operations/__init__.py | ljhljh235/AutoRest | b9ab4000e9b93d16925db84d08bafc225b098f8e | [
"MIT"
] | 1 | 2019-07-20T12:20:03.000Z | 2019-07-20T12:20:03.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .number_operations import NumberOperations
__all__ = [
'NumberOperations',
]
| 33 | 76 | 0.55615 |
9a0b3df570b86b784bcdaada06c7946dbbb32fec | 18,686 | py | Python | nested_inline/admin.py | bui3itf0ky00/django-nested-inline | aa0a5263ece7340ea5d5b7a3fdea8f8135bc4fa0 | [
"MIT"
] | null | null | null | nested_inline/admin.py | bui3itf0ky00/django-nested-inline | aa0a5263ece7340ea5d5b7a3fdea8f8135bc4fa0 | [
"MIT"
] | null | null | null | nested_inline/admin.py | bui3itf0ky00/django-nested-inline | aa0a5263ece7340ea5d5b7a3fdea8f8135bc4fa0 | [
"MIT"
] | null | null | null | from django import VERSION, forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.options import InlineModelAdmin, reverse
from django.contrib.admin.utils import unquote
from django.core.exceptions import PermissionDenied
from django.db import models, transaction
from django.forms.formsets import all_valid
from django.http import Http404
from django.templatetags.static import static
from django.utils.decorators import method_decorator
from django.utils.encoding import force_str
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
csrf_protect_m = method_decorator(csrf_protect)
class InlineInstancesMixin():
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if VERSION < (2, 1, 0):
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
else:
if not (inline.has_add_permission(request, obj) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
class NestedModelAdmin(InlineInstancesMixin, admin.ModelAdmin):
class Media:
css = {
"all": ('admin/css/forms-nested.css',)
}
js = ('admin/js/inlines-nested%s.js' % ('' if settings.DEBUG else '.min'),)
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
for form in formset.forms:
if hasattr(form, 'nested_formsets') and form not in formset.deleted_forms:
for nested_formset in form.nested_formsets:
self.save_formset(request, form, nested_formset, change)
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def add_nested_inline_formsets(self, request, inline, formset, depth=0):
if depth > 5:
raise Exception("Maximum nesting depth reached (5)")
for form in formset.forms:
nested_formsets = []
for nested_inline in inline.get_inline_instances(request):
InlineFormSet = nested_inline.get_formset(request, form.instance)
prefix = "%s-%s" % (form.prefix, InlineFormSet.get_default_prefix())
if request.method == 'POST' and any(s.startswith(prefix) for s in request.POST.keys()):
nested_formset = InlineFormSet(request.POST, request.FILES,
instance=form.instance,
prefix=prefix, queryset=nested_inline.get_queryset(request))
else:
nested_formset = InlineFormSet(instance=form.instance,
prefix=prefix, queryset=nested_inline.get_queryset(request))
nested_formsets.append(nested_formset)
if nested_inline.inlines:
self.add_nested_inline_formsets(request, nested_inline, nested_formset, depth=depth + 1)
form.nested_formsets = nested_formsets
def wrap_nested_inline_formsets(self, request, inline, formset):
media = None
def get_media(extra_media):
if media:
return media + extra_media
else:
return extra_media
for form in formset.forms:
wrapped_nested_formsets = []
for nested_inline, nested_formset in zip(inline.get_inline_instances(request), form.nested_formsets):
if form.instance.pk:
instance = form.instance
else:
instance = None
fieldsets = list(nested_inline.get_fieldsets(request, instance))
readonly = list(nested_inline.get_readonly_fields(request, instance))
prepopulated = dict(nested_inline.get_prepopulated_fields(request, instance))
wrapped_nested_formset = helpers.InlineAdminFormSet(
nested_inline, nested_formset,
fieldsets, prepopulated, readonly, model_admin=self,
)
wrapped_nested_formsets.append(wrapped_nested_formset)
media = get_media(wrapped_nested_formset.media)
if nested_inline.inlines:
media = get_media(self.wrap_nested_inline_formsets(request, nested_inline, nested_formset))
form.nested_formsets = wrapped_nested_formsets
return media
def formset_has_nested_data(self, formsets):
for formset in formsets:
if not formset.is_bound:
pass
for form in formset:
if hasattr(form, 'cleaned_data') and form.cleaned_data:
return True
elif hasattr(form, 'nested_formsets'):
if self.formset_has_nested_data(form.nested_formsets):
return True
def all_valid_with_nesting(self, formsets):
"Recursively validate all nested formsets"
if not all_valid(formsets):
return False
for formset in formsets:
if not formset.is_bound:
pass
for form in formset:
if hasattr(form, 'nested_formsets'):
if not self.all_valid_with_nesting(form.nested_formsets):
return False
# TODO - find out why this breaks when extra = 1 and just adding new item with no sub items
if (not hasattr(form, 'cleaned_data') or not form.cleaned_data) and\
self.formset_has_nested_data(form.nested_formsets):
form._errors["__all__"] = form.error_class(
[u"Parent object must be created when creating nested inlines."]
)
return False
return True
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
inline_instances = self.get_inline_instances(request, None)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
new_object = self.save_form(request, form, change=False)
form_validated = True
else:
form_validated = False
new_object = self.model()
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new="_saveasnew" in request.POST,
prefix=prefix, queryset=inline.get_queryset(request))
formsets.append(formset)
if inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
if self.all_valid_with_nesting(formsets) and form_validated:
self.save_model(request, new_object, form, False)
self.save_related(request, form, formsets, False)
args = ()
# Provide `add_message` argument to ModelAdmin.log_addition for
# Django 1.9 and up.
if VERSION[:2] >= (1, 9):
add_message = self.construct_change_message(
request, form, formsets, add=True
)
args = (request, new_object, add_message)
else:
args = (request, new_object)
self.log_addition(*args)
return self.response_add(request, new_object)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
form = ModelForm(initial=initial)
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=self.model(), prefix=prefix,
queryset=inline.get_queryset(request))
formsets.append(formset)
if hasattr(inline, 'inlines') and inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
self.get_prepopulated_fields(request),
self.get_readonly_fields(request),
model_admin=self)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
readonly = list(inline.get_readonly_fields(request))
prepopulated = dict(inline.get_prepopulated_fields(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
if hasattr(inline, 'inlines') and inline.inlines:
extra_media = self.wrap_nested_inline_formsets(
request, inline, formset)
if extra_media:
media += extra_media
context = {
'title': _('Add %s') % force_str(opts.verbose_name),
'adminform': adminForm,
'is_popup': "_popup" in request.GET,
'show_delete': False,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(self.admin_site.each_context(request))
context.update(extra_context or {})
return self.render_change_form(request, context, form_url=form_url, add=True)
@csrf_protect_m
@transaction.atomic
def change_view(self, request, object_id, form_url='', extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_str(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' %
(opts.app_label,
opts.module_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
formsets = []
inline_instances = self.get_inline_instances(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request, new_object):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(
request.POST, request.FILES, instance=new_object,
prefix=prefix, queryset=inline.get_queryset(request),
)
formsets.append(formset)
if hasattr(inline, 'inlines') and inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
if self.all_valid_with_nesting(formsets) and form_validated:
self.save_model(request, new_object, form, True)
self.save_related(request, form, formsets, True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
prefixes = {}
for FormSet, inline in self.get_formsets_with_inlines(request, obj):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=obj, prefix=prefix, queryset=inline.get_queryset(request))
formsets.append(formset)
if hasattr(inline, 'inlines') and inline.inlines:
self.add_nested_inline_formsets(request, inline, formset)
adminForm = helpers.AdminForm(
form, self.get_fieldsets(request, obj),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self,
)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline, formset, fieldsets, prepopulated, readonly, model_admin=self,
)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
if hasattr(inline, 'inlines') and inline.inlines:
extra_media = self.wrap_nested_inline_formsets(request, inline, formset)
if extra_media:
media += extra_media
context = {
'title': _('Change %s') % force_str(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.GET,
'media': media,
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'app_label': opts.app_label,
}
context.update(self.admin_site.each_context(request))
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj, form_url=form_url)
class NestedInline(InlineInstancesMixin, InlineModelAdmin):
inlines = []
new_objects = []
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
if VERSION[:2] >= (1, 9):
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js']
else:
js = ['jquery%s.js' % extra, 'jquery.init.js']
js.append('inlines-nested%s.js' % extra)
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_formsets_with_inlines(self, request, obj=None):
for inline in self.get_inline_instances(request):
yield inline.get_formset(request, obj), inline
class NestedStackedInline(NestedInline):
template = 'admin/edit_inline/stacked-nested.html'
class NestedTabularInline(NestedInline):
template = 'admin/edit_inline/tabular-nested.html'
| 46.024631 | 114 | 0.583324 |
af056cdb2df2dd347643ca9e4058ff17b7acbefb | 675 | py | Python | Pima-indians-diabetics/test.py | jyothiprakashpanaik/ML-4-e | dfaa0bc21ae6309bca95d7ef8c5db0274ed3038a | [
"MIT"
] | 1 | 2021-10-01T18:12:05.000Z | 2021-10-01T18:12:05.000Z | Pima-indians-diabetics/test.py | jyothiprakashpanaik/ML-4-e | dfaa0bc21ae6309bca95d7ef8c5db0274ed3038a | [
"MIT"
] | null | null | null | Pima-indians-diabetics/test.py | jyothiprakashpanaik/ML-4-e | dfaa0bc21ae6309bca95d7ef8c5db0274ed3038a | [
"MIT"
] | 1 | 2021-10-01T18:17:45.000Z | 2021-10-01T18:17:45.000Z | from keras.models import model_from_json
import numpy as np
from keras.preprocessing import image
from numpy import loadtxt
from sklearn.metrics import confusion_matrix
json_file = open('model.json','r')
load_model_json = json_file.read()
json_file.close()
model = model_from_json(load_model_json)
model.load_weights('model.h5')
print('Loaded model from disk')
dataset = loadtxt('pima-indians-diabetes.csv',delimiter=',')
dataset = dataset[650:]
x = dataset[:,0:8]
y = dataset[:,8]
pred = model.predict_classes(x)
for i in range(len(y)):
print('%s => %d (expected %d)' % (str(x[i]),pred[i],y[i]))
y_pred = model.predict_classes(x)
print(confusion_matrix(y,y_pred))
| 22.5 | 60 | 0.737778 |
ad52a559c0c3d618d8b3df285ee605d1fe5e6f4f | 113 | py | Python | game/gamesrc/world/worldbuild.py | abbacode/avaloria | 02e1805ac6e74543c96408b7951429f94bc140ca | [
"ClArtistic"
] | null | null | null | game/gamesrc/world/worldbuild.py | abbacode/avaloria | 02e1805ac6e74543c96408b7951429f94bc140ca | [
"ClArtistic"
] | null | null | null | game/gamesrc/world/worldbuild.py | abbacode/avaloria | 02e1805ac6e74543c96408b7951429f94bc140ca | [
"ClArtistic"
] | null | null | null | #INSERT backend_weapons
#INSERT backend_items
#INSERT backend_npcs
#INSERT quests
#INSERT zones
#INSERT globals
| 16.142857 | 23 | 0.831858 |
fda6b9702c081a834a01c87b665110aec3fde5cc | 11,739 | py | Python | src/sagemaker/pipeline.py | dlragha/sagemaker-python-sdk | 04a2e75c236752256729e84ccd8eb6163d8daab8 | [
"Apache-2.0"
] | 1 | 2019-12-28T00:47:41.000Z | 2019-12-28T00:47:41.000Z | src/sagemaker/pipeline.py | dlragha/sagemaker-python-sdk | 04a2e75c236752256729e84ccd8eb6163d8daab8 | [
"Apache-2.0"
] | null | null | null | src/sagemaker/pipeline.py | dlragha/sagemaker-python-sdk | 04a2e75c236752256729e84ccd8eb6163d8daab8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import sagemaker
from sagemaker.session import Session
from sagemaker.utils import name_from_image
from sagemaker.transformer import Transformer
class PipelineModel(object):
"""A pipeline of SageMaker
``Model``s that can be deployed to an ``Endpoint``.
"""
def __init__(
self, models, role, predictor_cls=None, name=None, vpc_config=None, sagemaker_session=None
):
"""Initialize an SageMaker ``Model`` which can be used to build an
Inference Pipeline comprising of multiple model containers.
Args:
models (list[sagemaker.Model]): For using multiple containers to
build an inference pipeline, you can pass a list of ``sagemaker.Model`` objects
in the order you want the inference to happen.
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
predictor_cls (callable[string, sagemaker.session.Session]): A
function to call to create a predictor (default: None). If not
None, ``deploy`` will return the result of invoking this
function on the created endpoint name.
name (str): The model name. If None, a default model name will be
selected on each ``deploy``.
vpc_config (dict[str, list[str]]): The VpcConfig set on the model
(default: None)
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
sagemaker_session (sagemaker.session.Session): A SageMaker Session
object, used for SageMaker interactions (default: None). If not
specified, one is created using the default AWS configuration
chain.
"""
self.models = models
self.role = role
self.predictor_cls = predictor_cls
self.name = name
self.vpc_config = vpc_config
self.sagemaker_session = sagemaker_session
self._model_name = None
self.endpoint_name = None
def pipeline_container_def(self, instance_type):
"""Return a dict created by ``sagemaker.pipeline_container_def()`` for
deploying this model to a specified instance type.
Subclasses can override this to provide custom container definitions
for deployment to a specific instance type. Called by ``deploy()``.
Args:
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
Returns:
list[dict[str, str]]: A list of container definition objects usable
with the CreateModel API in the scenario of multiple containers
(Inference Pipeline).
"""
return sagemaker.pipeline_container_def(self.models, instance_type)
def deploy(
self,
initial_instance_count,
instance_type,
endpoint_name=None,
tags=None,
wait=True,
update_endpoint=False,
data_capture_config=None,
):
"""Deploy this ``Model`` to an ``Endpoint`` and optionally return a
``Predictor``.
Create a SageMaker ``Model`` and ``EndpointConfig``, and deploy an
``Endpoint`` from this ``Model``. If ``self.predictor_cls`` is not None,
this method returns a the result of invoking ``self.predictor_cls`` on
the created endpoint name.
The name of the created model is accessible in the ``name`` field of
this ``Model`` after deploy returns
The name of the created endpoint is accessible in the
``endpoint_name`` field of this ``Model`` after deploy returns.
Args:
initial_instance_count (int): The initial number of instances to run
in the ``Endpoint`` created from this ``Model``.
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
endpoint_name (str): The name of the endpoint to create (default:
None). If not specified, a unique endpoint name will be created.
tags (List[dict[str, str]]): The list of tags to attach to this
specific endpoint.
wait (bool): Whether the call should wait until the deployment of
model completes (default: True).
update_endpoint (bool): Flag to update the model in an existing
Amazon SageMaker endpoint. If True, this will deploy a new
EndpointConfig to an already existing endpoint and delete
resources corresponding to the previous EndpointConfig. If
False, a new endpoint will be created. Default: False
data_capture_config (sagemaker.model_monitor.DataCaptureConfig): Specifies
configuration related to Endpoint data capture for use with
Amazon SageMaker Model Monitoring. Default: None.
Returns:
callable[string, sagemaker.session.Session] or None: Invocation of
``self.predictor_cls`` on the created endpoint name, if ``self.predictor_cls``
is not None. Otherwise, return None.
"""
if not self.sagemaker_session:
self.sagemaker_session = Session()
containers = self.pipeline_container_def(instance_type)
self.name = self.name or name_from_image(containers[0]["Image"])
self.sagemaker_session.create_model(
self.name, self.role, containers, vpc_config=self.vpc_config
)
production_variant = sagemaker.production_variant(
self.name, instance_type, initial_instance_count
)
self.endpoint_name = endpoint_name or self.name
data_capture_config_dict = None
if data_capture_config is not None:
data_capture_config_dict = data_capture_config._to_request_dict()
if update_endpoint:
endpoint_config_name = self.sagemaker_session.create_endpoint_config(
name=self.name,
model_name=self.name,
initial_instance_count=initial_instance_count,
instance_type=instance_type,
tags=tags,
data_capture_config_dict=data_capture_config_dict,
)
self.sagemaker_session.update_endpoint(self.endpoint_name, endpoint_config_name)
else:
self.sagemaker_session.endpoint_from_production_variants(
name=self.endpoint_name,
production_variants=[production_variant],
tags=tags,
wait=wait,
data_capture_config_dict=data_capture_config_dict,
)
if self.predictor_cls:
return self.predictor_cls(self.endpoint_name, self.sagemaker_session)
return None
def _create_sagemaker_pipeline_model(self, instance_type):
"""Create a SageMaker Model Entity
Args:
instance_type (str): The EC2 instance type that this Model will be
used for, this is only used to determine if the image needs GPU
support or not.
"""
if not self.sagemaker_session:
self.sagemaker_session = Session()
containers = self.pipeline_container_def(instance_type)
self.name = self.name or name_from_image(containers[0]["Image"])
self.sagemaker_session.create_model(
self.name, self.role, containers, vpc_config=self.vpc_config
)
def transformer(
self,
instance_count,
instance_type,
strategy=None,
assemble_with=None,
output_path=None,
output_kms_key=None,
accept=None,
env=None,
max_concurrent_transforms=None,
max_payload=None,
tags=None,
volume_kms_key=None,
):
"""Return a ``Transformer`` that uses this Model.
Args:
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example,
'ml.c4.xlarge'.
strategy (str): The strategy used to decide how to batch records in
a single request (default: None). Valid values: 'MULTI_RECORD'
and 'SINGLE_RECORD'.
assemble_with (str): How the output is assembled (default: None).
Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If
not specified, results are stored to a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the
transform output (default: None).
accept (str): The accept header passed by the client to
the inference endpoint. If it is supported by the endpoint,
it will be the format of the batch transform output.
env (dict): Environment variables to be set for use during the
transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests
to be made to each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP
request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If
none specified, then the tags used for the training job are used
for the transform job.
volume_kms_key (str): Optional. KMS key ID for encrypting the volume
attached to the ML compute instance (default: None).
"""
self._create_sagemaker_pipeline_model(instance_type)
return Transformer(
self.name,
instance_count,
instance_type,
strategy=strategy,
assemble_with=assemble_with,
output_path=output_path,
output_kms_key=output_kms_key,
accept=accept,
max_concurrent_transforms=max_concurrent_transforms,
max_payload=max_payload,
env=env,
tags=tags,
base_transform_job_name=self.name,
volume_kms_key=volume_kms_key,
sagemaker_session=self.sagemaker_session,
)
def delete_model(self):
"""Delete the SageMaker model backing this pipeline model. This does not
delete the list of SageMaker models used in multiple containers to build
the inference pipeline.
"""
if self.name is None:
raise ValueError("The SageMaker model must be created before attempting to delete.")
self.sagemaker_session.delete_model(self.name)
| 43.639405 | 98 | 0.634466 |
8b090621155235bd693b17424b257b545055f9c8 | 218 | py | Python | tests/test_meta_schemas_compilation.py | ocavue/jsonschemax | e9a2cd10d9610c6222f1a83869da929fe0ff2f2d | [
"MIT"
] | 1 | 2021-03-21T10:41:47.000Z | 2021-03-21T10:41:47.000Z | tests/test_meta_schemas_compilation.py | ocavue/jsonschemax | e9a2cd10d9610c6222f1a83869da929fe0ff2f2d | [
"MIT"
] | 3 | 2020-03-24T16:37:38.000Z | 2021-02-02T22:02:18.000Z | tests/test_meta_schemas_compilation.py | ocavue/jsonschemax | e9a2cd10d9610c6222f1a83869da929fe0ff2f2d | [
"MIT"
] | 1 | 2019-01-22T12:47:30.000Z | 2019-01-22T12:47:30.000Z | import jsonschemax
# jsonschemax.compile(
# {"properties": {"propertyNames": {"$ref": "#"}, "enum": {"items": True}}},
# jsonschemax.draft7_keyword_map,
# )
jsonschemax.compile(jsonschemax.draft7_meta_schema)
| 27.25 | 80 | 0.688073 |
4f0cc8d45913dc0f56f2e4e39015876b9e3e2488 | 11,056 | py | Python | tests/utils_tests/test_html.py | jmcdono362/django | 2014db50f4522243dba3190c640f64cf124d5b68 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2021-03-13T21:16:42.000Z | 2022-01-12T00:29:14.000Z | tests/utils_tests/test_html.py | jmcdono362/django | 2014db50f4522243dba3190c640f64cf124d5b68 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/utils_tests/test_html.py | jmcdono362/django | 2014db50f4522243dba3190c640f64cf124d5b68 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-02-24T03:48:14.000Z | 2019-02-24T03:48:14.000Z | import os
from datetime import datetime
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.html import (
conditional_escape, escape, escapejs, format_html, html_safe, json_script,
linebreaks, smart_urlquote, strip_spaces_between_tags, strip_tags, urlize,
)
from django.utils.safestring import mark_safe
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
function(value) equals output. If output is None, function(value)
equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
with self.subTest(value=value, output=output):
for pattern in patterns:
with self.subTest(value=value, output=output, pattern=pattern):
self.check_output(escape, pattern % value, pattern % output)
self.check_output(escape, lazystr(pattern % value), pattern % output)
# Check repeated values.
self.check_output(escape, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(escape, '<&', '<&')
def test_format_html(self):
self.assertEqual(
format_html(
"{} {} {third} {fourth}",
"< Dangerous >",
mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=mark_safe("<i>safe again</i>"),
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br>sub1<br>sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br>sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(linebreaks, value, output)
self.check_output(linebreaks, lazystr(value), output)
def test_strip_tags(self):
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# http://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
('<sc<!-- -->ript>test<<!-- -->/script>', 'ript>test'),
('<script>alert()</script>&h', 'alert()h'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_tags, value, output)
self.check_output(strip_tags, lazystr(value), output)
def test_strip_tags_files(self):
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
with self.subTest(filename=filename):
path = os.path.join(os.path.dirname(__file__), 'files', filename)
with open(path, 'r') as fp:
content = fp.read()
start = datetime.now()
stripped = strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
with self.subTest(value=value):
self.check_output(strip_spaces_between_tags, value)
self.check_output(strip_spaces_between_tags, lazystr(value))
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_spaces_between_tags, value, output)
self.check_output(strip_spaces_between_tags, lazystr(value), output)
def test_escapejs(self):
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
(
'and lots of whitespace: \r\n\t\v\f\b',
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'
),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(
'paragraph separator:\u2029and line separator:\u2028',
'paragraph separator:\\u2029and line separator:\\u2028'
),
('`', '\\u0060'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(escapejs, value, output)
self.check_output(escapejs, lazystr(value), output)
def test_json_script(self):
tests = (
# "<", ">" and "&" are quoted inside JSON strings
(('&<>', '<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"</script>')),
# "<", ">" and "&" are quoted inside JSON objects
(
{'a': '<script>test&ing</script>'},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}</script>'
),
# Lazy strings are quoted
(lazystr('&<>'), '<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"</script>'),
(
{'a': lazystr('<script>test&ing</script>')},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}</script>'
),
)
for arg, expected in tests:
with self.subTest(arg=arg):
self.assertEqual(json_script(arg, 'test_id'), expected)
def test_smart_urlquote(self):
items = (
('http://öäü.com/', 'http://xn--4ca9at.com/'),
('http://öäü.com/öäü/', 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/'),
# Everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered
# safe as per RFC.
('http://example.com/path/öäü/', 'http://example.com/path/%C3%B6%C3%A4%C3%BC/'),
('http://example.com/%C3%B6/ä/', 'http://example.com/%C3%B6/%C3%A4/'),
('http://example.com/?x=1&y=2+3&z=', 'http://example.com/?x=1&y=2+3&z='),
('http://example.com/?x=<>"\'', 'http://example.com/?x=%3C%3E%22%27'),
('http://example.com/?q=http://example.com/?x=1%26q=django',
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango',
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
)
# IDNs are properly quoted
for value, output in items:
with self.subTest(value=value, output=output):
self.assertEqual(smart_urlquote(value), output)
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(conditional_escape(s), '<h1>interop</h1>')
self.assertEqual(conditional_escape(mark_safe(s)), s)
self.assertEqual(conditional_escape(lazystr(mark_safe(s))), s)
def test_html_safe(self):
@html_safe
class HtmlClass:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(str(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
class BaseClass:
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(str(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
msg = "can't apply @html_safe to HtmlClass because it doesn't define __str__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
pass
def test_urlize(self):
tests = (
(
'Search for google.com/?q=! and see.',
'Search for <a href="http://google.com/?q=">google.com/?q=</a>! and see.'
),
(
lazystr('Search for google.com/?q=!'),
'Search for <a href="http://google.com/?q=">google.com/?q=</a>!'
),
)
for value, output in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), output)
| 43.1875 | 117 | 0.529667 |
dcd50a097142ab91f02225d8385855a0dd3132ce | 2,706 | py | Python | octavia/opts.py | BoTranVan/octavia | 70665664b2130f276291cefac0ed3bc0878d6cd9 | [
"Apache-2.0"
] | null | null | null | octavia/opts.py | BoTranVan/octavia | 70665664b2130f276291cefac0ed3bc0878d6cd9 | [
"Apache-2.0"
] | null | null | null | octavia/opts.py | BoTranVan/octavia | 70665664b2130f276291cefac0ed3bc0878d6cd9 | [
"Apache-2.0"
] | 1 | 2021-12-27T13:18:38.000Z | 2021-12-27T13:18:38.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import operator
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
import octavia.common.config
from octavia.common import constants
def list_opts():
return [
('DEFAULT',
itertools.chain(octavia.common.config.core_opts)),
('api_settings', octavia.common.config.api_opts),
('amphora_agent', octavia.common.config.amphora_agent_opts),
('networking', octavia.common.config.networking_opts),
('oslo_messaging', octavia.common.config.oslo_messaging_opts),
('haproxy_amphora', octavia.common.config.haproxy_amphora_opts),
('health_manager', octavia.common.config.healthmanager_opts),
('controller_worker', octavia.common.config.controller_worker_opts),
('task_flow', octavia.common.config.task_flow_opts),
('certificates', itertools.chain(
octavia.common.config.certificate_opts,
octavia.certificates.common.local.certgen_opts)),
('house_keeping', octavia.common.config.house_keeping_opts),
('keepalived_vrrp', octavia.common.config.keepalived_vrrp_opts),
('anchor', octavia.common.config.anchor_opts),
('nova', octavia.common.config.nova_opts),
('neutron', octavia.common.config.neutron_opts),
('glance', octavia.common.config.glance_opts),
('quotas', octavia.common.config.quota_opts),
add_auth_opts(),
]
def add_auth_opts():
opts = ks_loading.register_session_conf_options(
cfg.CONF, constants.SERVICE_AUTH)
opt_list = copy.deepcopy(opts)
opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0])
# NOTE(mhickey): There are a lot of auth plugins, we just generate
# the config options for a few common ones
plugins = ['password', 'v2password', 'v3password']
for name in plugins:
for plugin_option in ks_loading.get_auth_plugin_conf_options(name):
if all(option.name != plugin_option.name for option in opt_list):
opt_list.append(plugin_option)
opt_list.sort(key=operator.attrgetter('name'))
return (constants.SERVICE_AUTH, opt_list)
| 42.28125 | 77 | 0.717295 |
1770451e345f2c35de092550501488ad9380fabd | 1,133 | py | Python | observatory/dashboard/models/__init__.py | natestedman/Observatory | 6e810b22d844416b2a3057e99ef23baa0d122ab4 | [
"0BSD"
] | 1 | 2015-01-16T04:17:54.000Z | 2015-01-16T04:17:54.000Z | observatory/dashboard/models/__init__.py | natestedman/Observatory | 6e810b22d844416b2a3057e99ef23baa0d122ab4 | [
"0BSD"
] | null | null | null | observatory/dashboard/models/__init__.py | natestedman/Observatory | 6e810b22d844416b2a3057e99ef23baa0d122ab4 | [
"0BSD"
] | null | null | null | # Copyright (c) 2010, individual contributors (see AUTHORS file)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from AuthorRequest import AuthorRequest
from Blog import Blog
from BlogPost import BlogPost
from Commit import Commit
from Contributor import Contributor
from Event import Event
from EventSet import EventSet
from Project import Project
from Repository import Repository
from Screenshot import Screenshot
from URLPathedModel import URLPathedModel
| 41.962963 | 74 | 0.812004 |
5f1da5d3acd90c0ffeb395d09964ae68d2f6fd69 | 701 | py | Python | gxf/formatting.py | Talanor/gxf | 4dd2f3a123e646fcbf0f44b43f2004b04acba9be | [
"MIT"
] | 41 | 2015-02-16T02:30:19.000Z | 2019-11-03T06:39:52.000Z | gxf/formatting.py | Talanor/gxf | 4dd2f3a123e646fcbf0f44b43f2004b04acba9be | [
"MIT"
] | 4 | 2015-02-02T10:51:00.000Z | 2019-03-02T15:48:57.000Z | gxf/formatting.py | Talanor/gxf | 4dd2f3a123e646fcbf0f44b43f2004b04acba9be | [
"MIT"
] | 8 | 2015-01-21T20:48:21.000Z | 2019-01-28T12:53:56.000Z | # -*- coding: utf-8 -*-
import pygments
from pygments.formatters import TerminalFormatter
from pygments.token import Token # NOQA
formatter = TerminalFormatter(bg="dark")
class Formattable(object):
def __init__(self, tokens=None):
if tokens is not None:
self._tokens = tokens
def fmttokens(self):
yield from self._tokens
def format(self, *args, formatter=formatter, **kwargs):
return pygments.format(self.fmttokens(*args, **kwargs), formatter)
def output(self, *args, **kwargs):
print(self.format(*args, **kwargs), end="\n")
def __str__(self):
return self.format()
def __repr__(self):
return self.format()
| 23.366667 | 74 | 0.650499 |
07e89f3c97eef5faa211d90083c75758fd3214d4 | 3,216 | py | Python | homeassistant/components/stookalert/binary_sensor.py | lkempf/core | 8d7744a74f6f28657941dc095fb5a197e726ffff | [
"Apache-2.0"
] | null | null | null | homeassistant/components/stookalert/binary_sensor.py | lkempf/core | 8d7744a74f6f28657941dc095fb5a197e726ffff | [
"Apache-2.0"
] | null | null | null | homeassistant/components/stookalert/binary_sensor.py | lkempf/core | 8d7744a74f6f28657941dc095fb5a197e726ffff | [
"Apache-2.0"
] | null | null | null | """This integration provides support for Stookalert Binary Sensor."""
from __future__ import annotations
from datetime import timedelta
import stookalert
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_SAFETY,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
CONF_NAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import (
ATTR_ENTRY_TYPE,
CONF_PROVINCE,
DOMAIN,
ENTRY_TYPE_SERVICE,
LOGGER,
PROVINCES,
)
DEFAULT_NAME = "Stookalert"
ATTRIBUTION = "Data provided by rivm.nl"
SCAN_INTERVAL = timedelta(minutes=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PROVINCE): vol.In(PROVINCES),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Import the Stookalert platform into a config entry."""
LOGGER.warning(
"Configuration of the Stookalert platform in YAML is deprecated and will be "
"removed in Home Assistant 2022.1; Your existing configuration "
"has been imported into the UI automatically and can be safely removed "
"from your configuration.yaml file"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_PROVINCE: config[CONF_PROVINCE],
},
)
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Stookalert binary sensor from a config entry."""
client = hass.data[DOMAIN][entry.entry_id]
async_add_entities([StookalertBinarySensor(client, entry)], update_before_add=True)
class StookalertBinarySensor(BinarySensorEntity):
"""Defines a Stookalert binary sensor."""
_attr_attribution = ATTRIBUTION
_attr_device_class = DEVICE_CLASS_SAFETY
def __init__(self, client: stookalert.stookalert, entry: ConfigEntry) -> None:
"""Initialize a Stookalert device."""
self._client = client
self._attr_name = f"Stookalert {entry.data[CONF_PROVINCE]}"
self._attr_unique_id = entry.unique_id
self._attr_device_info = {
ATTR_IDENTIFIERS: {(DOMAIN, f"{entry.entry_id}")},
ATTR_NAME: entry.data[CONF_PROVINCE],
ATTR_MANUFACTURER: "RIVM",
ATTR_MODEL: "Stookalert",
ATTR_ENTRY_TYPE: ENTRY_TYPE_SERVICE,
}
def update(self) -> None:
"""Update the data from the Stookalert handler."""
self._client.get_alerts()
self._attr_is_on = self._client.state == 1
| 30.628571 | 87 | 0.70398 |
998afa8e887b49a29425e3c2b3650ead6e136ec1 | 1,597 | py | Python | atcoderpiechart.py | Vicfred/rankingdistribution | 72374afee2b503c62921edefb438e51cff5eadcd | [
"MIT"
] | 1 | 2021-06-16T14:33:25.000Z | 2021-06-16T14:33:25.000Z | atcoderpiechart.py | Vicfred/rankingdistribution | 72374afee2b503c62921edefb438e51cff5eadcd | [
"MIT"
] | null | null | null | atcoderpiechart.py | Vicfred/rankingdistribution | 72374afee2b503c62921edefb438e51cff5eadcd | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'red', 'orange', 'yellow', 'blue', 'cyan', 'green', 'brown', 'gray'
colors = ('#ff0000', '#ffb331', '#fef53a', '#2667f2', '#3afeee', '#00c911', '#5e4838', '#afafaf')
sizes = [147-1+1, 379-148+1, 1340-382+1, 3295-1352+1, 7221-3301+1, 14671-7254+1, 25565-14702+1, 79577-25598+1]
labels = 'red', 'orange', 'yellow', 'blue', 'cyan', 'green', 'brown'
colors = ('#ff0000', '#ffb331', '#fef53a', '#2667f2', '#3afeee', '#00c911', '#5e4838')
sizes = [147-1+1, 379-148+1, 1340-382+1, 3295-1352+1, 7221-3301+1, 14671-7254+1, 25565-14702+1]
# labels = 'red', 'orange', 'yellow', 'blue', 'cyan', 'green'
# colors = ('#ff0000', '#ffb331', '#fef53a', '#2667f2', '#3afeee', '#00c911')
# sizes = [147-1+1, 379-148+1, 1340-382+1, 3295-1352+1, 7221-3301+1, 14671-7254+1]
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
#explode = (0.5, 0.3, 0.2, 0, 0, 0, 0, 0)
patches, texts = plt.pie(sizes, colors=colors, wedgeprops = { 'linewidth' : 1, 'edgecolor' : 'white' })
total = sum(sizes)
cumulative = sizes.copy()
for i in range(1,len(sizes)):
cumulative[i] += cumulative[i - 1]
labels = [f'{l}, {s/total*100:0.2f}%' for l, s in zip(labels, sizes)]
labels = [f'{s/total*100:0.2f}% {c/total*100:0.2f}%' for s, c in zip(sizes, cumulative)]
plt.legend(bbox_to_anchor=(0.95, 1), loc='upper left', labels=labels)
plt.title('Atcoder ranking distribution', bbox={'facecolor':'0.8', 'pad':5})
plt.axis('equal')
plt.tight_layout()
plt.savefig('atcoderpie.png')
| 53.233333 | 110 | 0.631183 |
7cb20842311931067e1aa48e1f3d23e6fb540075 | 9,133 | py | Python | plugins/modules/oci_apm_control_plane_apm_domain_facts.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_apm_control_plane_apm_domain_facts.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_apm_control_plane_apm_domain_facts.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_apm_control_plane_apm_domain_facts
short_description: Fetches details about one or multiple ApmDomain resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ApmDomain resources in Oracle Cloud Infrastructure
- Lists all APM Domains for the specified tenant compartment.
- If I(apm_domain_id) is specified, the details of a single ApmDomain will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
apm_domain_id:
description:
- OCID of the APM Domain
- Required to get a specific apm_domain.
type: str
aliases: ["id"]
compartment_id:
description:
- The ID of the compartment in which to list resources.
- Required to list multiple apm_domains.
type: str
display_name:
description:
- A filter to return only resources that match the entire display name given.
type: str
aliases: ["name"]
lifecycle_state:
description:
- A filter to return only resources that match the given life-cycle state.
type: str
choices:
- "CREATING"
- "UPDATING"
- "ACTIVE"
- "DELETING"
- "FAILED"
sort_order:
description:
- The sort order to use, either 'asc' or 'desc'.
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is
ascending. If no value is specified timeCreated is default.
type: str
choices:
- "timeCreated"
- "displayName"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List apm_domains
oci_apm_control_plane_apm_domain_facts:
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
- name: Get a specific apm_domain
oci_apm_control_plane_apm_domain_facts:
apm_domain_id: "ocid1.apmdomain.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
apm_domains:
description:
- List of ApmDomain resources
returned: on success
type: complex
contains:
data_upload_endpoint:
description:
- Where APM Agents upload their observations and metrics.
returned: on success
type: str
sample: data_upload_endpoint_example
id:
description:
- Unique identifier that is immutable on creation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- APM Domain display name, can be updated.
returned: on success
type: str
sample: display_name_example
description:
description:
- Description of the APM Domain.
returned: on success
type: str
sample: description_example
compartment_id:
description:
- The OCID of the compartment corresponding to the APM Domain.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current lifecycle state of the APM Domain.
returned: on success
type: str
sample: CREATING
is_free_tier:
description:
- Indicates if this is an Always Free resource.
returned: on success
type: bool
sample: true
time_created:
description:
- The time the the APM Domain was created. An RFC3339 formatted datetime string
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time the APM Domain was updated. An RFC3339 formatted datetime string
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: [{
"data_upload_endpoint": "data_upload_endpoint_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"is_free_tier": true,
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.apm_control_plane import ApmDomainClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ApmDomainFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"apm_domain_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_apm_domain,
apm_domain_id=self.module.params.get("apm_domain_id"),
)
def list_resources(self):
optional_list_method_params = [
"display_name",
"lifecycle_state",
"sort_order",
"sort_by",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_apm_domains,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
ApmDomainFactsHelperCustom = get_custom_class("ApmDomainFactsHelperCustom")
class ResourceFactsHelper(ApmDomainFactsHelperCustom, ApmDomainFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
apm_domain_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
lifecycle_state=dict(
type="str",
choices=["CREATING", "UPDATING", "ACTIVE", "DELETING", "FAILED"],
),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(type="str", choices=["timeCreated", "displayName"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="apm_domain",
service_client_class=ApmDomainClient,
namespace="apm_control_plane",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(apm_domains=result)
if __name__ == "__main__":
main()
| 32.852518 | 150 | 0.616446 |
5cdc9998a36152c54ccf13a070ec30d19c480ffc | 27,119 | py | Python | zoom_meeting.py | mrcromulent/ZoomBot | 7b613fab954e54989e5bca79aae229c5c5f6872d | [
"MIT"
] | 3 | 2020-07-26T13:53:28.000Z | 2021-02-07T23:32:58.000Z | zoom_meeting.py | mrcromulent/ZoomBot | 7b613fab954e54989e5bca79aae229c5c5f6872d | [
"MIT"
] | 1 | 2020-09-26T02:55:01.000Z | 2020-09-27T02:15:08.000Z | zoom_meeting.py | mrcromulent/ZoomBot | 7b613fab954e54989e5bca79aae229c5c5f6872d | [
"MIT"
] | 3 | 2020-07-22T04:16:06.000Z | 2021-02-07T23:33:00.000Z | import re
import time
import pickle as pk
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from helper_functions import ParticipantNotFoundException, RoomIndexNotFoundException
from urllib3.exceptions import MaxRetryError
class ZoomMeeting(object):
"""
Container to hold all the main operations of handling a zoom meeting. self.d is the chromedriver and holds all the
DOM information. This is what you need to manipulate to access the webpage
user_locs is a dictionary that stores the last known breakout room location of users, indexed by user name
"""
move_phrase = "AssignMeTo: "
broadcast_phrase = "Broadcast: "
command_history = []
user_locs = dict()
broadcast_history = []
n_most_recent = [[], []]
very_long_wait = 20 # seconds
long_wait = 4 # seconds
short_wait = 0 # seconds
ZOOM_SIGNIN_PATH = "https://zoom.us/signin"
ZOOM_START_PATH = "https://zoom.us/start/webmeeting"
ZOOM_PROFILE_PATH = "https://zoom.us/profile"
ZOOM_MEETINGS_PATH = "https://zoom.us/meeting"
def __init__(self, meeting_params):
self.d = None
self.room_names = meeting_params["room_names"]
self.SESSION_PATH = meeting_params["SESSION_PATH"]
self.CHROME_PATH = meeting_params["CHROME_PATH"]
self.username = meeting_params["username"]
self.password = meeting_params["password"]
self.meeting_docs = meeting_params["meeting_docs"]
def set_driver_from_file(self):
"""
Restarts ZoomBot using session information stored at SESSION_PATH. Sets self.d as driver from file
"""
with open(self.SESSION_PATH, "rb") as handle:
session_info = pk.load(handle)
driver = webdriver.Remote(command_executor=session_info["url"], desired_capabilities={})
driver.close() # this prevents the dummy browser
driver.session_id = session_info["session_id"]
self.d = driver
self.set_global_driver_settings()
def set_global_driver_settings(self):
"""
Sets the driver implicit wait time and maximises the chrome window
"""
self.d.implicitly_wait(self.long_wait)
self.d.maximize_window()
def set_new_driver(self):
"""
Initialises a new driver and saves this information to SESSION_PATH in case a restart is required
"""
# Make a driver and login
driver = webdriver.Chrome(self.CHROME_PATH)
# Save session info
session_info = {"url": driver.command_executor._url,
"session_id": driver.session_id}
with open(self.SESSION_PATH, "wb") as handle:
pk.dump(session_info, handle)
self.d = driver
self.set_global_driver_settings()
def logged_in(self):
"""
Returns True if the user is logged in
"""
self.d.get(self.ZOOM_PROFILE_PATH)
not_logged_in, _ = self.check_if_exists(By.NAME, "password", self.very_long_wait)
if not_logged_in:
return False
return True
def login(self):
"""
Enters the user's login credentials on the login page
"""
uname = self.d.find_element_by_name("email")
uname.clear()
uname.send_keys(self.username)
pword = self.d.find_element_by_name("password")
pword.clear()
pword.send_keys(self.password)
pword.send_keys(Keys.RETURN)
print("Handle any CAPTCHAS and popups that appear. You have 10 minutes")
WebDriverWait(self.d, 600).until(ec.title_is("My Profile - Zoom"), "Waiting for profile page to load")
def dismiss_audio(self):
"""
Closes the "Connect to audio" frame
"""
print("Dismissing audio")
self.click_if_exists(By.XPATH, '//div[@data-focus-lock-disabled="false"]/div/div/button')
def open_chat(self):
"""
Opens the chat
"""
print("Opening chat")
self.click_if_exists(By.XPATH, '//button[@aria-label="close the chat pane"]')
self.d.find_element_by_xpath('//button[@aria-label="open the chat pane"]').click()
def send_message_to_chat(self, message):
"""
Sends the string message to chat
:param message:
:return:
"""
self.d.find_element_by_class_name("chat-box__chat-textarea").send_keys(message)
self.d.find_element_by_class_name("chat-box__chat-textarea").send_keys(Keys.RETURN)
def get_n_most_recent_chat_messages(self, n):
"""
Retrieves the n most recent messages from the chat
:param n:
:return:
"""
chat_items = self.d.find_elements_by_class_name("chat-item__chat-info")[-n:]
authors = []
messages = []
for item in chat_items:
authors.append(item.find_element_by_xpath(".//div[1]/span[1]").get_attribute("innerText").strip())
messages.append(item.find_element_by_xpath(".//pre[1]").get_attribute("innerText"))
return authors, messages
def open_participants_pane(self):
"""
Opens the participants pane
:return:
"""
print("Opening participants pane")
self.click_if_exists(By.XPATH, '//button[starts-with(@aria-label, "close the manage participants list pane")]')
self.d.find_element_by_xpath(
'//button[starts-with(@aria-label, "open the manage participants list pane")]').click()
def open_breakout_room_menu(self):
"""
Opens the breakout rooms menu
:return:
"""
already_open, _ = self.check_if_exists(By.CLASS_NAME, "bo-room-item-container__btn-group")
if not already_open:
open_button_visible, button = self.check_if_exists(By.XPATH, '//button[@aria-label="Breakout Rooms"]')
if open_button_visible:
button.click()
else:
self.d.find_element_by_id("moreButton").click()
self.d.find_element_by_xpath('//a[@aria-label="Breakout Rooms"]').click()
def set_up_breakout_rooms(self):
"""
Sets the number and names of the breakout rooms
:return:
"""
print("Setting up breakout rooms")
# Set up n rooms with manual arrangement
self.open_breakout_room_menu()
rooms_not_started, _ = self.check_if_exists(By.CLASS_NAME, 'zmu-number-input', self.long_wait)
if rooms_not_started:
self.d.find_element_by_class_name('zmu-number-input').send_keys(Keys.BACKSPACE)
self.d.find_element_by_class_name('zmu-number-input').send_keys(str(len(self.room_names)))
self.d.find_element_by_xpath('//div[@aria-label="Manually"]').click()
self.d.find_element_by_class_name("bo-createwindow-content__actions").\
find_element_by_xpath('.//button[2]').click()
# Rename rooms according to room_names
bo_room_list_container = self.d.find_element_by_class_name("bo-room-list-container")
for i, name in enumerate(self.room_names):
bo_room = bo_room_list_container.find_element_by_xpath(f".//ul/li[{i + 1}]")
content = bo_room.find_element_by_xpath(".//div/div/div")
# Mouse over correct room and click rename
ActionChains(self.d).move_to_element(content).click().perform()
ActionChains(self.d).move_to_element(content.find_element_by_xpath(".//button[1]")).click().perform()
# Type in new name and confirm
self.d.find_element_by_class_name('confirm-tip__tip').find_element_by_xpath(".//input").send_keys(name)
self.d.find_element_by_class_name('confirm-tip__footer').find_element_by_xpath('.//button[1]').click()
@property
def d(self):
"""
Property method for chromedriver
:return:
"""
return self._d
@d.setter
def d(self, value):
self._d = value
def check_if_exists(self, by_tag, link_tag, wait_time=None):
"""
Checks for the existence of a particular element on the DOM
:param by_tag:
:param link_tag:
:param wait_time:
:return:
"""
if wait_time is None:
wait_time = self.short_wait
try:
WebDriverWait(self.d, wait_time).until(ec.presence_of_element_located((by_tag, link_tag)), "")
return True, self.d.find_element(by_tag, link_tag)
except TimeoutException:
return False, None
def click_if_exists(self, by_tag, link_tag, wait_time=None):
"""
Clicks a particular element if it exists in the DOM
:param by_tag:
:param link_tag:
:param wait_time:
:return:
"""
exists, elem = self.check_if_exists(by_tag, link_tag, wait_time)
if exists and elem.is_displayed():
elem.click()
def join_from_browser(self):
"""
Clicks the "join from browser" button when starting up a meeting. Also dismisses the existing meeting
:return:
"""
print("Joining from browser")
self.click_if_exists(By.ID, "btn_end_meeting", self.long_wait)
self.click_if_exists(By.PARTIAL_LINK_TEXT, "join from your browser", self.long_wait)
self.click_if_exists(By.ID, "btn_end_meeting", self.long_wait)
def move_is_valid(self, target_user, target_room):
"""
Determines whether a move is valid. Validity is defined as:
- the target_user's name is not truncated
- the target_room is either a valid name and
- the target_user is not in the target_room already
:param target_user:
:param target_room:
:return:
"""
if target_user.endswith("..."):
self.send_message_to_chat(f"{target_user} - Name too long. Please shorten it.")
return False
if target_room in self.room_names:
if target_user not in self.room_participants(target_room):
return True
else:
self.send_message_to_chat(f"{target_user} already in {target_room}")
return False
elif target_room.startswith("[CLASS NAME]"):
pass
else:
self.send_message_to_chat(f"{target_user} - {target_room} is not a valid name. Check spelling.")
def start_new_call(self):
"""
Starts a new Zoom Meeting if an existing one cannot be resumed
:return:
"""
self.d.get(self.ZOOM_START_PATH)
self.set_up_call()
def disable_screen_sharing(self):
"""
Disables sharing to limit data transfer rate
:return:
"""
self.d.find_element_by_id("sharePermissionMenu").click()
adv_sharing_opts = self.d.find_element_by_xpath('//ul[@aria-labelledby="sharePermissionMenu"]/li[3]/a')
adv_sharing_opts.click()
only_me_button = self.d.find_element_by_xpath('//div[@aria-labelledby="radio_group_ability"]/div/div')
only_me_button.click()
close_button = self.d.find_element_by_class_name('zm-modal-footer-default-actions').\
find_element_by_xpath('.//button')
close_button.click()
def set_up_call(self):
"""
Sets up call for new or resumed calls, providing time for the user to dismiss popups and performs setup tasks
such as opening chat, and setting up breakout rooms
:return:
"""
self.join_from_browser()
print(f"Waiting {self.very_long_wait} seconds for page...")
time.sleep(self.very_long_wait)
self.dismiss_audio()
self.disable_video_receiving()
self.disable_screen_sharing()
self.open_chat()
self.open_participants_pane()
self.set_up_breakout_rooms()
self.send_message_to_chat(self.meeting_docs)
# Lower the implicit wait time
self.d.implicitly_wait(self.short_wait)
def add_driver(self, existing_meeting_id):
"""
Determines if an existing meeting is still open and if so, tries to connect to the Chrome instance running that
meeting
:param existing_meeting_id:
:return:
"""
try:
self.set_driver_from_file()
if existing_meeting_id is not None:
return False
return True
except (MaxRetryError, FileNotFoundError) as e:
print(str(e))
self.set_new_driver()
return False
def start_scheduled_call(self, existing_meeting_id):
"""
Checks the meetings tab for a scheduled meeting matching existing_meeting_id
:param existing_meeting_id:
:return:
"""
existing_meeting_id = existing_meeting_id.strip()
print(f"Searching for existing meeting: {existing_meeting_id}")
self.d.get(self.ZOOM_MEETINGS_PATH)
meetings = self.d.find_element_by_class_name("mtg-list-content")
meetings_list = meetings.find_elements_by_class_name("clearfix")
for meeting in meetings_list:
meeting_id = meeting.find_element_by_class_name("meetingId").get_attribute("innerText").strip()
if existing_meeting_id == meeting_id:
meeting.find_element_by_xpath('.//a[@ui-cmd="Start"]').click()
self.set_up_call()
return None
print(f"Couldn't find meeting matching ID: {existing_meeting_id}")
def resume_call(self):
"""
Determines whether setup of a Zoom meeting was complete by checking the window title. If incomplete, a new call
is started
:return:
"""
if "Zoom Meeting" or "Polit University Online" in self.d.title:
print("Setting up call")
self.set_up_call()
else:
print("Starting new call")
self.start_new_call()
def new_messages(self, aut_mess):
"""
Returns true if [authors, messages] is different than self.n_most_recent
"""
first_set = set(map(tuple, self.n_most_recent))
secnd_set = set(map(tuple, aut_mess))
return bool(first_set ^ secnd_set)
def broadcast_message(self, message):
"""
Uses Zoom's broadcast feature to send the string message to all breakout rooms
:param message:
:return:
"""
if self.breakout_rooms_started():
self.open_breakout_room_menu()
bc_button = self.d.find_element_by_class_name("bo-room-in-progress-footer__actions").\
find_element_by_xpath(".//button")
bc_button.click()
textarea = self.d.find_element_by_class_name("bo-room-broadcast-paper__textarea")
textarea.send_keys(message)
send_button = self.d.find_element_by_class_name("bo-room-broadcast-paper__footer").\
find_element_by_xpath(".//button")
send_button.click()
# Add to history to avoid rebroadcast
self.broadcast_history.append(message)
def extract_from_message(self, message, keyword):
"""
Catches messages after a command phrase (Move Phrase or Broadcast Phrase) and zeros out any other command
phrases in the message
"""
regexp = r"(?<=" + keyword + ").+$"
match = re.findall(regexp, message, re.MULTILINE)
message = match[-1]
clean_msg = message.replace(self.move_phrase, "").replace(self.broadcast_phrase, "").strip()
return clean_msg
def move_user_to_room(self, target_user, target_room):
"""
Attempts to move target_user to target_room. This procedure is different depending on whether or not breakout
rooms are currently "started"
This function searches the last known location of a user first to save time. If they are not found, it cycles
through all the breakout rooms (including the psuedo-room "Unassigned") until the user is found.
:param target_user:
:param target_room:
:return:
"""
bo_room_list_container = self.d.find_element_by_class_name("bo-room-list-container")
# If rooms have not yet been opened
if not self.breakout_rooms_started():
# click assign
bo_room_list_container.find_element_by_xpath(
'//div[starts-with(@aria-label, "' + target_room + '")]/div[2]/button').click()
assign_list = self.d.find_element_by_class_name("bo-room-assign-list-scrollbar")
assignees = []
avail_assignees = self.d.find_elements_by_class_name("zmu-data-selector-item")
for assignee in avail_assignees:
assignees.append(assignee.find_element_by_xpath(".//span/span[2]/span").get_attribute("innerText"))
target_idx = assignees.index(target_user)
assign_list.find_element_by_xpath(f".//div/div/div[{target_idx+1}]").click()
self.start_breakout_rooms()
self.user_locs[target_user] = target_room
# If rooms have already been opened
else:
try:
lk_room_name = self.last_known_location(target_user)
lk_room_participants = self.room_participants(lk_room_name)
if target_user not in lk_room_participants:
lk_room_name = self.search_rooms_for_user(target_user)
lk_room_idx = self.room_idx(lk_room_name, unassigned_incl=True)
curr_room = bo_room_list_container.find_element_by_xpath(f".//ul/li[{lk_room_idx}]")
attendees = curr_room.find_elements_by_class_name("bo-room-item-attendee")
attendee = attendees[self.attendee_idx(target_user, lk_room_name, start_at_zero=True)]
self.assign_attendee_to_room(attendee, target_room, lk_room_name)
self.user_locs[target_user] = target_room
except (NoSuchElementException, ParticipantNotFoundException) as e:
msg = f"Tried to move {target_user} to {target_room}. An error occurred. Did they move?"
self.send_message_to_chat(msg)
print(str(e))
def search_rooms_for_user(self, target_user):
"""
Cycles through rooms to locate target_user. The room where they are located is returned or
ParticipantNotFoundException is raised
"""
if target_user in self.room_participants("Unassigned"):
return "Unassigned"
for test_room in self.room_names:
room_part = self.room_participants(test_room)
if target_user in room_part:
return test_room
raise ParticipantNotFoundException(f"Target user: {target_user} not found")
def room_participants(self, target_room):
"""
Returns a list of participants of target_room
:param target_room:
:return:
"""
if target_room == "Unassigned" and not self.unassigned_room_open():
return []
if self.room_name_valid(target_room):
xpath = '//div[starts-with(@aria-label, "' + target_room + '")]'
room_banner = self.d.find_element_by_xpath(xpath)
else:
return []
if not (room_banner.get_attribute("aria-expanded") == 'true'):
room_banner.find_element_by_xpath('.//parent::div').click()
bo_room = room_banner.find_element_by_xpath('.//parent::div//parent::li')
attendees = bo_room.find_elements_by_class_name("bo-room-item-attendee")
participants = []
for attendee in attendees:
raw_name = attendee.find_element_by_xpath('.//span[starts-with(@class, "bo-room-item-attendee__name")]')
participants.append(raw_name.get_attribute('innerText'))
return participants
def breakout_rooms_started(self):
"""
Returns True if breakout rooms have started
:return:
"""
self.open_breakout_room_menu()
exists, _ = self.check_if_exists(By.CLASS_NAME, "bo-room-not-started-footer__btn-wrapper")
return not exists
def start_breakout_rooms(self):
"""
Clicks "Open Breakout Rooms" button
:return:
"""
self.d.find_element_by_class_name("bo-room-not-started-footer__actions")\
.find_element_by_xpath(".//div[4]/button[1]").click()
def ask_for_help_window_open(self):
"""
Returns True if the "Ask Host For Help" window is open
:return:
"""
exists, _ = self.check_if_exists(By.XPATH, '//div[contains(@aria-label, "asked for help.")]')
return exists
def close_ask_for_help(self):
"""
Closes the "Ask Host for Help" window and sends a message to chat with the details of who asked for help
:return:
"""
mod_wind = self.d.find_element_by_xpath('//div[contains(@aria-label, "asked for help.")]')
help_text = mod_wind.find_element_by_class_name('content').get_attribute("innerText")
mod_wind.find_element_by_xpath('.//button[@aria-label="close modal"]').click()
self.send_message_to_chat(help_text)
def room_idx(self, target_room, start_at_zero=False, unassigned_incl=False, skip=None):
"""
Returns the index of target_room based on its name
:param target_room: room name
:param start_at_zero: indicates whether the list uses Python indexing or DOM indexing
:param unassigned_incl: indicates whether "Unassigned" appears on the list
:param skip: Any elements that will be excluded from the list
:return: Integer index
"""
uro = self.unassigned_room_open()
offset = 0
if target_room in self.room_names:
base_idx = self.room_names.index(target_room)
if unassigned_incl and uro:
offset += 1
elif target_room == "Unassigned" and uro:
base_idx = 0
elif target_room == "Unassigned" and not uro:
return None
else:
msg = f"target room: {target_room}, start at zero: {start_at_zero}, " \
f"unassigned included: {unassigned_incl}, skip: {skip}"
raise RoomIndexNotFoundException(msg)
if not start_at_zero:
offset += 1
if skip is not None:
if "Unassigned" in skip:
skip.remove("Unassigned")
for room in skip:
if self.room_names.index(room) < base_idx:
offset -= 1
return base_idx + offset
def last_known_location(self, target_user):
"""
Returns the last known location of a user based on the dictionary self.user_locs
:param target_user:
:return:
"""
if target_user in self.user_locs.keys():
return self.user_locs[target_user]
else:
return "Unassigned"
def attendee_idx(self, target_user, room, start_at_zero=False):
"""
Returns the index of a particular target user based on their name
:param target_user: user name
:param room: room that user is currently in
:param start_at_zero: indicates whether Python indexing or DOM indexing is used
:return: Integer index
"""
if start_at_zero:
offset = 0
else:
offset = 1
attendees = self.room_participants(room)
return attendees.index(target_user) + offset
def unassigned_room_open(self):
"""
Returns True if Breakout rooms are open and there are unassigned users
:return:
"""
first_bo_item_name = self.d.find_element_by_class_name("bo-room-item-container__title")\
.get_attribute("innerText")
if first_bo_item_name != self.room_names[0]:
return True
return False
def room_name_valid(self, room_name):
"""
Returns true if the room name is Unassigned or in the name list
:param room_name:
:return:
"""
return room_name in self.room_names or room_name == "Unassigned"
def assign_attendee_to_room(self, attendee, target_room, lk_room_name):
"""
Clcicks the Assign button next to a uaer's name to move them to target_room
:param attendee:
:param target_room:
:param lk_room_name:
:return:
"""
# Click Assign To
ActionChains(self.d).move_to_element(attendee).perform()
assign_button = self.d.find_element_by_class_name("bo-room-item-attendee__tools").\
find_element_by_xpath(".//button")
ActionChains(self.d).move_to_element(assign_button).click().perform()
assign_box = self.d.find_element_by_class_name("bo-room-item-attendee__moveto-list-scrollbar")
options = assign_box.find_elements_by_class_name("zmu-data-selector-item")
options[self.room_idx(target_room, start_at_zero=True, unassigned_incl=False, skip=[lk_room_name])].click()
def trim_messages(self, messages, authors, num):
"""Trims the most recent message in the chat using the internal memory. This is to prevent re-execution of
commands which have already been performed.
The issue here is that Zoom groups together your most recent messages into one message. For example, if you sent
a message 10 minutes ago and then sent a new message, both of those would be returned as your newest message,
provided no one else sent anything to the chat in the interim. The goal here is to only grab new content."""
most_recent_messages = messages[-num:]
most_recent_authors = authors[-num:]
most_recent_messages_mem = self.n_most_recent[1][-num:]
most_recent_authors_mem = self.n_most_recent[0][-num:]
msg_copy = messages[:]
if len(most_recent_messages_mem) > 0:
for i, msg in enumerate(most_recent_messages):
if most_recent_authors[i] == most_recent_authors_mem[i]:
msg_copy[i] = msg.replace(most_recent_messages_mem[i], "")
return msg_copy
def disable_video_receiving(self):
"""
Disables video receiving
:return:
"""
print("Disabling video receiving")
self.d.find_element_by_id("moreButton").click()
self.click_if_exists(By.XPATH, '//a[@aria-label="Disable video receiving"]')
| 37.098495 | 120 | 0.631587 |
6b36d42e26452d30d8d8922ba5b0b162349c593c | 13,789 | py | Python | tests/test_database_cache_clearer.py | enterstudio/the-blue-alliance | b53f752fe1f059b4b6f91c841e1865a6c6b81268 | [
"MIT"
] | null | null | null | tests/test_database_cache_clearer.py | enterstudio/the-blue-alliance | b53f752fe1f059b4b6f91c841e1865a6c6b81268 | [
"MIT"
] | null | null | null | tests/test_database_cache_clearer.py | enterstudio/the-blue-alliance | b53f752fe1f059b4b6f91c841e1865a6c6b81268 | [
"MIT"
] | null | null | null | import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from database import get_affected_queries
from database.award_query import EventAwardsQuery, TeamAwardsQuery, TeamYearAwardsQuery, TeamEventAwardsQuery
from database.district_query import DistrictsInYearQuery, DistrictHistoryQuery
from database.event_query import EventQuery, EventListQuery, DistrictEventsQuery, TeamEventsQuery, TeamYearEventsQuery
from database.event_details_query import EventDetailsQuery
from database.match_query import MatchQuery, EventMatchesQuery, TeamEventMatchesQuery, TeamYearMatchesQuery
from database.media_query import TeamSocialMediaQuery, TeamYearMediaQuery, EventTeamsMediasQuery, EventTeamsPreferredMediasQuery
from database.robot_query import TeamRobotsQuery
from database.team_query import TeamQuery, TeamListQuery, TeamListYearQuery, DistrictTeamsQuery, EventTeamsQuery, TeamParticipationQuery, TeamDistrictsQuery
from consts.district_type import DistrictType
from models.district import District
from models.district_team import DistrictTeam
from models.event import Event
from models.event_details import EventDetails
from models.event_team import EventTeam
from models.match import Match
from models.team import Team
class TestDatabaseCacheClearer(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.eventteam_2015casj_frc254 = EventTeam(
id='2015casj_frc254',
event=ndb.Key(Event, '2015casj'),
team=ndb.Key(Team, 'frc254'),
year=2015,
)
self.eventteam_2015cama_frc604 = EventTeam(
id='2015cama_frc604',
event=ndb.Key(Event, '2015cama'),
team=ndb.Key(Team, 'frc604'),
year=2015,
)
self.eventteam_2010cama_frc604 = EventTeam(
id='2010cama_frc604',
event=ndb.Key(Event, '2010cama'),
team=ndb.Key(Team, 'frc604'),
year=2010,
)
self.eventteam_2015casj_frc254.put()
self.eventteam_2015cama_frc604.put()
self.eventteam_2010cama_frc604.put()
self.districtteam_2015fim_frc254 = DistrictTeam(
id='2015fim_frc254',
district=DistrictType.MICHIGAN,
team=ndb.Key(Team, 'frc254'),
year=2015,
)
self.districtteam_2015mar_frc604 = DistrictTeam(
id='2015mar_frc604',
district=DistrictType.MID_ATLANTIC,
team=ndb.Key(Team, 'frc604'),
year=2015,
)
self.districtteam_2015fim_frc254.put()
self.districtteam_2015mar_frc604.put()
self.district_2015ne = District(
id='2015ne',
year=2015,
abbreviation='ne',
)
self.district_2016chs = District(
id='2016chs',
year=2016,
abbreviation='chs',
)
self.district_2015ne.put()
self.district_2016chs.put()
def tearDown(self):
self.testbed.deactivate()
def test_award_updated(self):
affected_refs = {
'event': {ndb.Key(Event, '2015casj'), ndb.Key(Event, '2015cama')},
'team_list': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')},
'year': {2014, 2015}
}
cache_keys = [q.cache_key for q in get_affected_queries.award_updated(affected_refs)]
self.assertEqual(len(cache_keys), 12)
self.assertTrue(EventAwardsQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventAwardsQuery('2015cama').cache_key in cache_keys)
self.assertTrue(TeamAwardsQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamAwardsQuery('frc604').cache_key in cache_keys)
self.assertTrue(TeamYearAwardsQuery('frc254', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearAwardsQuery('frc254', 2015).cache_key in cache_keys)
self.assertTrue(TeamYearAwardsQuery('frc604', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearAwardsQuery('frc604', 2015).cache_key in cache_keys)
self.assertTrue(TeamEventAwardsQuery('frc254', '2015casj').cache_key in cache_keys)
self.assertTrue(TeamEventAwardsQuery('frc254', '2015cama').cache_key in cache_keys)
self.assertTrue(TeamEventAwardsQuery('frc604', '2015casj').cache_key in cache_keys)
self.assertTrue(TeamEventAwardsQuery('frc604', '2015cama').cache_key in cache_keys)
def test_event_updated(self):
affected_refs = {
'key': {ndb.Key(Event, '2015casj'), ndb.Key(Event, '2015cama')},
'year': {2014, 2015},
'event_district_key': {'2015fim', '2014mar'}
}
cache_keys = [q.cache_key for q in get_affected_queries.event_updated(affected_refs)]
self.assertEqual(len(cache_keys), 10)
self.assertTrue(EventQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventListQuery(2014).cache_key in cache_keys)
self.assertTrue(EventListQuery(2015).cache_key in cache_keys)
self.assertTrue(DistrictEventsQuery('2015fim').cache_key in cache_keys)
self.assertTrue(DistrictEventsQuery('2014mar').cache_key in cache_keys)
self.assertTrue(TeamEventsQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamEventsQuery('frc604').cache_key in cache_keys)
self.assertTrue(TeamYearEventsQuery('frc254', 2015).cache_key in cache_keys)
self.assertTrue(TeamYearEventsQuery('frc604', 2015).cache_key in cache_keys)
def test_event_details_updated(self):
affected_refs = {
'key': {ndb.Key(EventDetails, '2015casj'), ndb.Key(EventDetails, '2015cama')},
}
cache_keys = [q.cache_key for q in get_affected_queries.event_details_updated(affected_refs)]
self.assertEqual(len(cache_keys), 2)
self.assertTrue(EventDetailsQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventDetailsQuery('2015cama').cache_key in cache_keys)
def test_match_updated(self):
affected_refs = {
'key': {ndb.Key(Match, '2015casj_qm1'), ndb.Key(Match, '2015casj_qm2')},
'event': {ndb.Key(Event, '2015casj'), ndb.Key(Event, '2015cama')},
'team_keys': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')},
'year': {2014, 2015},
}
cache_keys = [q.cache_key for q in get_affected_queries.match_updated(affected_refs)]
self.assertEqual(len(cache_keys), 12)
self.assertTrue(MatchQuery('2015casj_qm1').cache_key in cache_keys)
self.assertTrue(MatchQuery('2015casj_qm2').cache_key in cache_keys)
self.assertTrue(EventMatchesQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventMatchesQuery('2015cama').cache_key in cache_keys)
self.assertTrue(TeamEventMatchesQuery('frc254', '2015casj').cache_key in cache_keys)
self.assertTrue(TeamEventMatchesQuery('frc254', '2015cama').cache_key in cache_keys)
self.assertTrue(TeamEventMatchesQuery('frc604', '2015casj').cache_key in cache_keys)
self.assertTrue(TeamEventMatchesQuery('frc604', '2015cama').cache_key in cache_keys)
self.assertTrue(TeamYearMatchesQuery('frc254', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearMatchesQuery('frc254', 2015).cache_key in cache_keys)
self.assertTrue(TeamYearMatchesQuery('frc604', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearMatchesQuery('frc604', 2015).cache_key in cache_keys)
def test_media_updated(self):
affected_refs = {
'references': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')},
'year': {2014, 2015},
}
cache_keys = [q.cache_key for q in get_affected_queries.media_updated(affected_refs)]
self.assertEqual(len(cache_keys), 10)
self.assertTrue(TeamYearMediaQuery('frc254', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearMediaQuery('frc254', 2015).cache_key in cache_keys)
self.assertTrue(TeamSocialMediaQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamYearMediaQuery('frc604', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearMediaQuery('frc604', 2015).cache_key in cache_keys)
self.assertTrue(TeamSocialMediaQuery('frc604').cache_key in cache_keys)
self.assertTrue(EventTeamsMediasQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventTeamsMediasQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventTeamsPreferredMediasQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventTeamsPreferredMediasQuery('2015casj').cache_key in cache_keys)
def test_robot_updated(self):
affected_refs = {
'team': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')},
}
cache_keys = [q.cache_key for q in get_affected_queries.robot_updated(affected_refs)]
self.assertEqual(len(cache_keys), 2)
self.assertTrue(TeamRobotsQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamRobotsQuery('frc604').cache_key in cache_keys)
def test_team_updated(self):
affected_refs = {
'key': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')},
}
cache_keys = [q.cache_key for q in get_affected_queries.team_updated(affected_refs)]
self.assertEqual(len(cache_keys), 12)
self.assertTrue(TeamQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamQuery('frc604').cache_key in cache_keys)
self.assertTrue(TeamListQuery(0).cache_key in cache_keys)
self.assertTrue(TeamListQuery(1).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2015, 0).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2015, 1).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2010, 1).cache_key in cache_keys)
self.assertTrue(DistrictTeamsQuery('2015fim').cache_key in cache_keys)
self.assertTrue(DistrictTeamsQuery('2015mar').cache_key in cache_keys)
self.assertTrue(EventTeamsQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventTeamsQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventTeamsQuery('2010cama').cache_key in cache_keys)
def test_eventteam_updated(self):
affected_refs = {
'event': {ndb.Key(Event, '2015casj'), ndb.Key(Event, '2015cama')},
'team': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')},
'year': {2014, 2015}
}
cache_keys = [q.cache_key for q in get_affected_queries.eventteam_updated(affected_refs)]
self.assertEqual(len(cache_keys), 18)
self.assertTrue(TeamEventsQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamEventsQuery('frc604').cache_key in cache_keys)
self.assertTrue(TeamParticipationQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamParticipationQuery('frc604').cache_key in cache_keys)
self.assertTrue(TeamYearEventsQuery('frc254', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearEventsQuery('frc254', 2015).cache_key in cache_keys)
self.assertTrue(TeamYearEventsQuery('frc604', 2014).cache_key in cache_keys)
self.assertTrue(TeamYearEventsQuery('frc604', 2015).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2014, 0).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2014, 1).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2015, 0).cache_key in cache_keys)
self.assertTrue(TeamListYearQuery(2015, 1).cache_key in cache_keys)
self.assertTrue(EventTeamsQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventTeamsQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventTeamsMediasQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventTeamsMediasQuery('2015casj').cache_key in cache_keys)
self.assertTrue(EventTeamsPreferredMediasQuery('2015cama').cache_key in cache_keys)
self.assertTrue(EventTeamsPreferredMediasQuery('2015casj').cache_key in cache_keys)
def test_districtteam_updated(self):
affected_refs = {
'district_key': {ndb.Key(District, '2015fim'), ndb.Key(District, '2015mar')},
'team': {ndb.Key(Team, 'frc254'), ndb.Key(Team, 'frc604')}
}
cache_keys = [q.cache_key for q in get_affected_queries.districtteam_updated(affected_refs)]
self.assertEqual(len(cache_keys), 4)
self.assertTrue(DistrictTeamsQuery('2015fim').cache_key in cache_keys)
self.assertTrue(DistrictTeamsQuery('2015mar').cache_key in cache_keys)
self.assertTrue(TeamDistrictsQuery('frc254').cache_key in cache_keys)
self.assertTrue(TeamDistrictsQuery('frc604').cache_key in cache_keys)
def test_district_updated(self):
affected_refs = {
'year': {2015, 2016},
'abbreviation': {'ne', 'chs'}
}
cache_keys = [q.cache_key for q in get_affected_queries.district_updated(affected_refs)]
self.assertEqual(len(cache_keys), 4)
self.assertTrue(DistrictsInYearQuery(2015).cache_key in cache_keys)
self.assertTrue(DistrictsInYearQuery(2016).cache_key in cache_keys)
self.assertTrue(DistrictHistoryQuery('ne').cache_key in cache_keys)
self.assertTrue(DistrictHistoryQuery('chs').cache_key in cache_keys)
| 50.694853 | 156 | 0.702299 |
28a1b7c810f623ff978873a3aa91a8f66c3267dd | 5,791 | py | Python | lib/galaxy/datatypes/triples.py | uio-bmi/galaxy-graph-peak-caller | 0e0e8e9bd6d461a4e25b49cea2e6753043f747e0 | [
"CC-BY-3.0"
] | 2 | 2017-10-23T14:44:12.000Z | 2018-01-14T10:37:28.000Z | lib/galaxy/datatypes/triples.py | uio-bmi/galaxy-graph-peak-caller | 0e0e8e9bd6d461a4e25b49cea2e6753043f747e0 | [
"CC-BY-3.0"
] | 30 | 2016-10-20T15:35:12.000Z | 2018-10-02T15:59:54.000Z | lib/galaxy/datatypes/triples.py | uio-bmi/galaxy-graph-peak-caller | 0e0e8e9bd6d461a4e25b49cea2e6753043f747e0 | [
"CC-BY-3.0"
] | 4 | 2017-06-12T09:54:31.000Z | 2019-03-15T12:02:39.000Z | """
Triple format classes
"""
import logging
import re
from . import (
binary,
data,
text,
xml
)
log = logging.getLogger(__name__)
class Triples(data.Data):
"""
The abstract base class for the file format that can contain triples
"""
edam_data = "data_0582"
edam_format = "format_2376"
file_ext = "triples"
def sniff(self, filename):
"""
Returns false and the user must manually set.
"""
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'Triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class NTriples(data.Text, Triples):
"""
The N-Triples triple data format
"""
edam_format = "format_3256"
file_ext = "nt"
def sniff(self, filename):
with open(filename, "r") as f:
# <http://example.org/dir/relfile> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://example.org/type> .
if re.compile(r'<[^>]*>\s<[^>]*>\s<[^>]*>\s\.').search(f.readline(1024)):
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'N-Triples triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class N3(data.Text, Triples):
"""
The N3 triple data format
"""
edam_format = "format_3257"
file_ext = "n3"
def sniff(self, filename):
"""
Returns false and the user must manually set.
"""
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'Notation-3 Triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class Turtle(data.Text, Triples):
"""
The Turtle triple data format
"""
edam_format = "format_3255"
file_ext = "ttl"
def sniff(self, filename):
with open(filename, "r") as f:
# @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
line = f.readline(1024)
if re.compile(r'@prefix\s+[^:]*:\s+<[^>]*>\s\.').search(line):
return True
if re.compile(r'@base\s+<[^>]*>\s\.').search(line):
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'Turtle triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
# TODO: we might want to look at rdflib or a similar, larger lib/egg
class Rdf(xml.GenericXml, Triples):
"""
Resource Description Framework format (http://www.w3.org/RDF/).
"""
edam_format = "format_3261"
file_ext = "rdf"
def sniff(self, filename):
with open(filename, "r") as f:
firstlines = "".join(f.readlines(5000))
# <rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" ...
match = re.compile(r'xmlns:([^=]*)="http://www.w3.org/1999/02/22-rdf-syntax-ns#"').search(firstlines)
if not match and (match.group(1) + ":RDF") in firstlines:
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'RDF/XML triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class Jsonld(text.Json, Triples):
"""
The JSON-LD data format
"""
# format not defined in edam so we use the json format number
edam_format = "format_3464"
file_ext = "jsonld"
def sniff(self, filename):
if self._looks_like_json(filename):
with open(filename, "r") as f:
firstlines = "".join(f.readlines(5000))
if "\"@id\"" in firstlines or "\"@context\"" in firstlines:
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'JSON-LD triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class HDT(binary.Binary, Triples):
"""
The HDT triple data format
"""
edam_format = "format_2376"
file_ext = "hdt"
def sniff(self, filename):
with open(filename, "rb") as f:
if f.read(4) == "$HDT":
return True
return False
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'HDT triple data'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
| 30.640212 | 124 | 0.577448 |
8c9db84a5aed42e867e35335d1a9a03937ce2247 | 19,795 | py | Python | google/ads/googleads/v9/services/services/accessible_bidding_strategy_service/client.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/services/services/accessible_bidding_strategy_service/client.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/services/services/accessible_bidding_strategy_service/client.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import accessible_bidding_strategy
from google.ads.googleads.v9.services.types import (
accessible_bidding_strategy_service,
)
from .transports.base import (
AccessibleBiddingStrategyServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import AccessibleBiddingStrategyServiceGrpcTransport
class AccessibleBiddingStrategyServiceClientMeta(type):
"""Metaclass for the AccessibleBiddingStrategyService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AccessibleBiddingStrategyServiceTransport]]
_transport_registry["grpc"] = AccessibleBiddingStrategyServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AccessibleBiddingStrategyServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AccessibleBiddingStrategyServiceClient(
metaclass=AccessibleBiddingStrategyServiceClientMeta
):
"""Service to read accessible bidding strategies."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AccessibleBiddingStrategyServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AccessibleBiddingStrategyServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AccessibleBiddingStrategyServiceTransport:
"""Return the transport used by the client instance.
Returns:
AccessibleBiddingStrategyServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def accessible_bidding_strategy_path(
customer_id: str, bidding_strategy_id: str,
) -> str:
"""Return a fully-qualified accessible_bidding_strategy string."""
return "customers/{customer_id}/accessibleBiddingStrategies/{bidding_strategy_id}".format(
customer_id=customer_id, bidding_strategy_id=bidding_strategy_id,
)
@staticmethod
def parse_accessible_bidding_strategy_path(path: str) -> Dict[str, str]:
"""Parse a accessible_bidding_strategy path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/accessibleBiddingStrategies/(?P<bidding_strategy_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, AccessibleBiddingStrategyServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the accessible bidding strategy service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AccessibleBiddingStrategyServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AccessibleBiddingStrategyServiceTransport):
# transport is a AccessibleBiddingStrategyServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AccessibleBiddingStrategyServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_accessible_bidding_strategy(
self,
request: Union[
accessible_bidding_strategy_service.GetAccessibleBiddingStrategyRequest,
dict,
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> accessible_bidding_strategy.AccessibleBiddingStrategy:
r"""Returns the requested accessible bidding strategy in
full detail.
Args:
request (Union[google.ads.googleads.v9.services.types.GetAccessibleBiddingStrategyRequest, dict]):
The request object. Request message for
[AccessibleBiddingStrategyService.GetAccessibleBiddingStrategy][google.ads.googleads.v9.services.AccessibleBiddingStrategyService.GetAccessibleBiddingStrategy].
resource_name (:class:`str`):
Required. The resource name of the
accessible bidding strategy to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.AccessibleBiddingStrategy:
Represents a view of
BiddingStrategies owned by and shared
with the customer. In contrast to
BiddingStrategy, this resource includes
strategies owned by managers of the
customer and shared with this customer -
in addition to strategies owned by this
customer. This resource does not provide
metrics and only exposes a limited
subset of the BiddingStrategy
attributes.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a accessible_bidding_strategy_service.GetAccessibleBiddingStrategyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
accessible_bidding_strategy_service.GetAccessibleBiddingStrategyRequest,
):
request = accessible_bidding_strategy_service.GetAccessibleBiddingStrategyRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_accessible_bidding_strategy
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AccessibleBiddingStrategyServiceClient",)
| 41.239583 | 176 | 0.64294 |
c7846bbe4e9c7ba2f971b4e222ef907d7f55b1c4 | 22,547 | py | Python | utils/lit/lit/llvm/config.py | kkeita/llvm_autofdo | 9ecd435b55a3e58e1e3f71d478ff02c57840a900 | [
"Apache-2.0"
] | 34 | 2019-05-29T03:15:48.000Z | 2022-03-24T03:14:58.000Z | utils/lit/lit/llvm/config.py | kkeita/llvm_autofdo | 9ecd435b55a3e58e1e3f71d478ff02c57840a900 | [
"Apache-2.0"
] | 4 | 2017-12-13T18:19:11.000Z | 2018-11-17T04:37:14.000Z | utils/lit/lit/llvm/config.py | kkeita/llvm_autofdo | 9ecd435b55a3e58e1e3f71d478ff02c57840a900 | [
"Apache-2.0"
] | 9 | 2019-06-11T06:15:44.000Z | 2022-03-07T16:34:50.000Z | import os
import platform
import re
import subprocess
import sys
import lit.util
from lit.llvm.subst import FindTool
from lit.llvm.subst import ToolSubst
def binary_feature(on, feature, off_prefix):
return feature if on else off_prefix + feature
class LLVMConfig(object):
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
features = config.available_features
self.use_lit_shell = False
# Tweak PATH for Win32 to decide to use bash.exe or not.
if sys.platform == 'win32':
# For tests that require Windows to run.
features.add('system-windows')
# Seek sane tools in directories and set to $PATH.
path = self.lit_config.getToolsPath(config.lit_tools_dir,
config.environment['PATH'],
['cmp.exe', 'grep.exe', 'sed.exe'])
if path is not None:
self.with_environment('PATH', path, append_path=True)
# Many tools behave strangely if these environment variables aren't set.
self.with_system_environment(['SystemDrive', 'SystemRoot', 'TEMP', 'TMP'])
self.use_lit_shell = True
# Choose between lit's internal shell pipeline runner and a real shell. If
# LIT_USE_INTERNAL_SHELL is in the environment, we use that as an override.
lit_shell_env = os.environ.get('LIT_USE_INTERNAL_SHELL')
if lit_shell_env:
self.use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
if not self.use_lit_shell:
features.add('shell')
# Running on Darwin OS
if platform.system() == 'Darwin':
# FIXME: lld uses the first, other projects use the second.
# We should standardize on the former.
features.add('system-linker-mach-o')
features.add('system-darwin')
elif platform.system() == 'Windows':
# For tests that require Windows to run.
features.add('system-windows')
elif platform.system() == "Linux":
features.add('system-linux')
elif platform.system() in ['FreeBSD']:
config.available_features.add('system-freebsd')
elif platform.system() == "NetBSD":
features.add('system-netbsd')
# Native compilation: host arch == default triple arch
# Both of these values should probably be in every site config (e.g. as
# part of the standard header. But currently they aren't)
host_triple = getattr(config, 'host_triple', None)
target_triple = getattr(config, 'target_triple', None)
if host_triple and host_triple == target_triple:
features.add('native')
# Sanitizers.
sanitizers = getattr(config, 'llvm_use_sanitizer', '')
sanitizers = frozenset(x.lower() for x in sanitizers.split(';'))
features.add(binary_feature('address' in sanitizers, 'asan', 'not_'))
features.add(binary_feature('memory' in sanitizers, 'msan', 'not_'))
features.add(binary_feature(
'undefined' in sanitizers, 'ubsan', 'not_'))
have_zlib = getattr(config, 'have_zlib', None)
features.add(binary_feature(have_zlib, 'zlib', 'no'))
# Check if we should run long running tests.
long_tests = lit_config.params.get('run_long_tests', None)
if lit.util.pythonize_bool(long_tests):
features.add('long_tests')
if target_triple:
if re.match(r'^x86_64.*-apple', target_triple):
host_cxx = getattr(config, 'host_cxx', None)
if 'address' in sanitizers and self.get_clang_has_lsan(host_cxx, target_triple):
self.with_environment(
'ASAN_OPTIONS', 'detect_leaks=1', append_path=True)
if re.match(r'^x86_64.*-linux', target_triple):
features.add('x86_64-linux')
if re.match(r'.*-windows-msvc$', target_triple):
features.add('target-windows')
use_gmalloc = lit_config.params.get('use_gmalloc', None)
if lit.util.pythonize_bool(use_gmalloc):
# Allow use of an explicit path for gmalloc library.
# Will default to '/usr/lib/libgmalloc.dylib' if not set.
gmalloc_path_str = lit_config.params.get('gmalloc_path',
'/usr/lib/libgmalloc.dylib')
if gmalloc_path_str is not None:
self.with_environment(
'DYLD_INSERT_LIBRARIES', gmalloc_path_str)
def with_environment(self, variable, value, append_path=False):
if append_path:
# For paths, we should be able to take a list of them and process all
# of them.
paths_to_add = value
if lit.util.is_string(paths_to_add):
paths_to_add = [paths_to_add]
def norm(x):
return os.path.normcase(os.path.normpath(x))
current_paths = self.config.environment.get(variable, None)
if current_paths:
current_paths = current_paths.split(os.path.pathsep)
paths = [norm(p) for p in current_paths]
else:
paths = []
# If we are passed a list [a b c], then iterating this list forwards
# and adding each to the beginning would result in b c a. So we
# need to iterate in reverse to end up with the original ordering.
for p in reversed(paths_to_add):
# Move it to the front if it already exists, otherwise insert it at the
# beginning.
p = norm(p)
try:
paths.remove(p)
except ValueError:
pass
paths = [p] + paths
value = os.pathsep.join(paths)
self.config.environment[variable] = value
def with_system_environment(self, variables, append_path=False):
if lit.util.is_string(variables):
variables = [variables]
for v in variables:
value = os.environ.get(v)
if value:
self.with_environment(v, value, append_path)
def clear_environment(self, variables):
for name in variables:
if name in self.config.environment:
del self.config.environment[name]
def get_process_output(self, command):
try:
cmd = subprocess.Popen(
command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=self.config.environment)
stdout, stderr = cmd.communicate()
stdout = lit.util.to_string(stdout)
stderr = lit.util.to_string(stderr)
return (stdout, stderr)
except OSError:
self.lit_config.fatal('Could not run process %s' % command)
def feature_config(self, features):
# Ask llvm-config about the specified feature.
arguments = [x for (x, _) in features]
config_path = os.path.join(self.config.llvm_tools_dir, 'llvm-config')
output, _ = self.get_process_output([config_path] + arguments)
lines = output.split('\n')
for (feature_line, (_, patterns)) in zip(lines, features):
# We should have either a callable or a dictionary. If it's a
# dictionary, grep each key against the output and use the value if
# it matches. If it's a callable, it does the entire translation.
if callable(patterns):
features_to_add = patterns(feature_line)
self.config.available_features.update(features_to_add)
else:
for (re_pattern, feature) in patterns.items():
if re.search(re_pattern, feature_line):
self.config.available_features.add(feature)
# Note that when substituting %clang_cc1 also fill in the include directory of
# the builtin headers. Those are part of even a freestanding environment, but
# Clang relies on the driver to locate them.
def get_clang_builtin_include_dir(self, clang):
# FIXME: Rather than just getting the version, we should have clang print
# out its resource dir here in an easy to scrape form.
clang_dir, _ = self.get_process_output(
[clang, '-print-file-name=include'])
if not clang_dir:
self.lit_config.fatal(
"Couldn't find the include dir for Clang ('%s')" % clang)
clang_dir = clang_dir.strip()
if sys.platform in ['win32'] and not self.use_lit_shell:
# Don't pass dosish path separator to msys bash.exe.
clang_dir = clang_dir.replace('\\', '/')
# Ensure the result is an ascii string, across Python2.5+ - Python3.
return clang_dir
# On macOS, LSan is only supported on clang versions 5 and higher
def get_clang_has_lsan(self, clang, triple):
if not clang:
self.lit_config.warning(
'config.host_cxx is unset but test suite is configured to use sanitizers.')
return False
clang_binary = clang.split()[0]
version_string, _ = self.get_process_output(
[clang_binary, '--version'])
if not 'clang' in version_string:
self.lit_config.warning(
"compiler '%s' does not appear to be clang, " % clang_binary +
'but test suite is configured to use sanitizers.')
return False
if re.match(r'.*-linux', triple):
return True
if re.match(r'^x86_64.*-apple', triple):
version_regex = re.search(r'version ([0-9]+)\.([0-9]+).([0-9]+)', version_string)
major_version_number = int(version_regex.group(1))
minor_version_number = int(version_regex.group(2))
patch_version_number = int(version_regex.group(3))
if 'Apple LLVM' in version_string:
# Apple LLVM doesn't yet support LSan
return False
else:
return major_version_number >= 5
return False
def make_itanium_abi_triple(self, triple):
m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
if not m:
self.lit_config.fatal(
"Could not turn '%s' into Itanium ABI triple" % triple)
if m.group(3).lower() != 'windows':
# All non-windows triples use the Itanium ABI.
return triple
return m.group(1) + '-' + m.group(2) + '-' + m.group(3) + '-gnu'
def make_msabi_triple(self, triple):
m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
if not m:
self.lit_config.fatal(
"Could not turn '%s' into MS ABI triple" % triple)
isa = m.group(1).lower()
vendor = m.group(2).lower()
os = m.group(3).lower()
if os == 'windows' and re.match(r'.*-msvc$', triple):
# If the OS is windows and environment is msvc, we're done.
return triple
if isa.startswith('x86') or isa == 'amd64' or re.match(r'i\d86', isa):
# For x86 ISAs, adjust the OS.
return isa + '-' + vendor + '-windows-msvc'
# -msvc is not supported for non-x86 targets; use a default.
return 'i686-pc-windows-msvc'
def add_tool_substitutions(self, tools, search_dirs=None):
if not search_dirs:
search_dirs = [self.config.llvm_tools_dir]
if lit.util.is_string(search_dirs):
search_dirs = [search_dirs]
tools = [x if isinstance(x, ToolSubst) else ToolSubst(x)
for x in tools]
search_dirs = os.pathsep.join(search_dirs)
substitutions = []
for tool in tools:
match = tool.resolve(self, search_dirs)
# Either no match occurred, or there was an unresolved match that
# is ignored.
if not match:
continue
subst_key, tool_pipe, command = match
# An unresolved match occurred that can't be ignored. Fail without
# adding any of the previously-discovered substitutions.
if not command:
return False
substitutions.append((subst_key, tool_pipe + command))
self.config.substitutions.extend(substitutions)
return True
def use_default_substitutions(self):
tool_patterns = [
ToolSubst('FileCheck', unresolved='fatal'),
# Handle these specially as they are strings searched for during testing.
ToolSubst(r'\| \bcount\b', command=FindTool(
'count'), verbatim=True, unresolved='fatal'),
ToolSubst(r'\| \bnot\b', command=FindTool('not'), verbatim=True, unresolved='fatal')]
self.config.substitutions.append(('%python', '"%s"' % (sys.executable)))
self.add_tool_substitutions(
tool_patterns, [self.config.llvm_tools_dir])
def use_llvm_tool(self, name, search_env=None, required=False, quiet=False):
"""Find the executable program 'name', optionally using the specified
environment variable as an override before searching the
configuration's PATH."""
# If the override is specified in the environment, use it without
# validation.
if search_env:
tool = self.config.environment.get(search_env)
if tool:
return tool
# Otherwise look in the path.
tool = lit.util.which(name, self.config.environment['PATH'])
if required and not tool:
message = "couldn't find '{}' program".format(name)
if search_env:
message = message + \
', try setting {} in your environment'.format(search_env)
self.lit_config.fatal(message)
if tool:
tool = os.path.normpath(tool)
if not self.lit_config.quiet and not quiet:
self.lit_config.note('using {}: {}'.format(name, tool))
return tool
def use_clang(self, additional_tool_dirs=[], additional_flags=[], required=True):
"""Configure the test suite to be able to invoke clang.
Sets up some environment variables important to clang, locates a
just-built or installed clang, and add a set of standard
substitutions useful to any test suite that makes use of clang.
"""
# Clear some environment variables that might affect Clang.
#
# This first set of vars are read by Clang, but shouldn't affect tests
# that aren't specifically looking for these features, or are required
# simply to run the tests at all.
#
# FIXME: Should we have a tool that enforces this?
# safe_env_vars = ('TMPDIR', 'TEMP', 'TMP', 'USERPROFILE', 'PWD',
# 'MACOSX_DEPLOYMENT_TARGET', 'IPHONEOS_DEPLOYMENT_TARGET',
# 'VCINSTALLDIR', 'VC100COMNTOOLS', 'VC90COMNTOOLS',
# 'VC80COMNTOOLS')
possibly_dangerous_env_vars = ['COMPILER_PATH', 'RC_DEBUG_OPTIONS',
'CINDEXTEST_PREAMBLE_FILE', 'LIBRARY_PATH',
'CPATH', 'C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH',
'OBJC_INCLUDE_PATH', 'OBJCPLUS_INCLUDE_PATH',
'LIBCLANG_TIMING', 'LIBCLANG_OBJTRACKING',
'LIBCLANG_LOGGING', 'LIBCLANG_BGPRIO_INDEX',
'LIBCLANG_BGPRIO_EDIT', 'LIBCLANG_NOTHREADS',
'LIBCLANG_RESOURCE_USAGE',
'LIBCLANG_CODE_COMPLETION_LOGGING']
# Clang/Win32 may refer to %INCLUDE%. vsvarsall.bat sets it.
if platform.system() != 'Windows':
possibly_dangerous_env_vars.append('INCLUDE')
self.clear_environment(possibly_dangerous_env_vars)
# Tweak the PATH to include the tools dir and the scripts dir.
# Put Clang first to avoid LLVM from overriding out-of-tree clang builds.
exe_dir_props = [self.config.name.lower() + '_tools_dir', 'clang_tools_dir', 'llvm_tools_dir']
paths = [getattr(self.config, pp) for pp in exe_dir_props
if getattr(self.config, pp, None)]
paths = additional_tool_dirs + paths
self.with_environment('PATH', paths, append_path=True)
lib_dir_props = [self.config.name.lower() + '_libs_dir', 'clang_libs_dir', 'llvm_shlib_dir', 'llvm_libs_dir']
paths = [getattr(self.config, pp) for pp in lib_dir_props
if getattr(self.config, pp, None)]
self.with_environment('LD_LIBRARY_PATH', paths, append_path=True)
# Discover the 'clang' and 'clangcc' to use.
self.config.clang = self.use_llvm_tool(
'clang', search_env='CLANG', required=required)
shl = getattr(self.config, 'llvm_shlib_dir', None)
pext = getattr(self.config, 'llvm_plugin_ext', None)
if shl:
self.config.substitutions.append(('%llvmshlibdir', shl))
if pext:
self.config.substitutions.append(('%pluginext', pext))
builtin_include_dir = self.get_clang_builtin_include_dir(self.config.clang)
tool_substitutions = [
ToolSubst('%clang', command=self.config.clang, extra_args=additional_flags),
ToolSubst('%clang_analyze_cc1', command='%clang_cc1', extra_args=['-analyze', '%analyze']+additional_flags),
ToolSubst('%clang_cc1', command=self.config.clang, extra_args=['-cc1', '-internal-isystem', builtin_include_dir, '-nostdsysteminc']+additional_flags),
ToolSubst('%clang_cpp', command=self.config.clang, extra_args=['--driver-mode=cpp']+additional_flags),
ToolSubst('%clang_cl', command=self.config.clang, extra_args=['--driver-mode=cl']+additional_flags),
ToolSubst('%clangxx', command=self.config.clang, extra_args=['--driver-mode=g++']+additional_flags),
]
self.add_tool_substitutions(tool_substitutions)
self.config.substitutions.append(('%itanium_abi_triple',
self.make_itanium_abi_triple(self.config.target_triple)))
self.config.substitutions.append(('%ms_abi_triple',
self.make_msabi_triple(self.config.target_triple)))
self.config.substitutions.append(
('%resource_dir', builtin_include_dir))
# The host triple might not be set, at least if we're compiling clang from
# an already installed llvm.
if self.config.host_triple and self.config.host_triple != '@LLVM_HOST_TRIPLE@':
self.config.substitutions.append(('%target_itanium_abi_host_triple',
'--target=%s' % self.make_itanium_abi_triple(self.config.host_triple)))
else:
self.config.substitutions.append(
('%target_itanium_abi_host_triple', ''))
# FIXME: Find nicer way to prohibit this.
self.config.substitutions.append(
(' clang ', """\"*** Do not use 'clang' in tests, use '%clang'. ***\""""))
self.config.substitutions.append(
(' clang\+\+ ', """\"*** Do not use 'clang++' in tests, use '%clangxx'. ***\""""))
self.config.substitutions.append(
(' clang-cc ',
"""\"*** Do not use 'clang-cc' in tests, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' clang-cl ',
"""\"*** Do not use 'clang-cl' in tests, use '%clang_cl'. ***\""""))
self.config.substitutions.append(
(' clang -cc1 -analyze ',
"""\"*** Do not use 'clang -cc1 -analyze' in tests, use '%clang_analyze_cc1'. ***\""""))
self.config.substitutions.append(
(' clang -cc1 ',
"""\"*** Do not use 'clang -cc1' in tests, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' %clang-cc1 ',
"""\"*** invalid substitution, use '%clang_cc1'. ***\""""))
self.config.substitutions.append(
(' %clang-cpp ',
"""\"*** invalid substitution, use '%clang_cpp'. ***\""""))
self.config.substitutions.append(
(' %clang-cl ',
"""\"*** invalid substitution, use '%clang_cl'. ***\""""))
def use_lld(self, additional_tool_dirs=[], required=True):
"""Configure the test suite to be able to invoke lld.
Sets up some environment variables important to lld, locates a
just-built or installed lld, and add a set of standard
substitutions useful to any test suite that makes use of lld.
"""
# Tweak the PATH to include the tools dir and the scripts dir.
exe_dir_props = [self.config.name.lower() + '_tools_dir', 'lld_tools_dir', 'llvm_tools_dir']
paths = [getattr(self.config, pp) for pp in exe_dir_props
if getattr(self.config, pp, None)]
paths = additional_tool_dirs + paths
self.with_environment('PATH', paths, append_path=True)
lib_dir_props = [self.config.name.lower() + '_libs_dir', 'lld_libs_dir', 'llvm_libs_dir']
paths = [getattr(self.config, pp) for pp in lib_dir_props
if getattr(self.config, pp, None)]
self.with_environment('LD_LIBRARY_PATH', paths, append_path=True)
# Discover the 'clang' and 'clangcc' to use.
ld_lld = self.use_llvm_tool('ld.lld', required=required)
lld_link = self.use_llvm_tool('lld-link', required=required)
ld64_lld = self.use_llvm_tool('ld64.lld', required=required)
wasm_ld = self.use_llvm_tool('wasm-ld', required=required)
was_found = ld_lld and lld_link and ld64_lld and wasm_ld
tool_substitutions = []
if ld_lld:
tool_substitutions.append(ToolSubst('ld.lld', command=ld_lld))
if lld_link:
tool_substitutions.append(ToolSubst('lld-link', command=lld_link))
if ld64_lld:
tool_substitutions.append(ToolSubst('ld64.lld', command=ld64_lld))
if wasm_ld:
tool_substitutions.append(ToolSubst('wasm-ld', command=wasm_ld))
self.add_tool_substitutions(tool_substitutions)
return was_found
| 45.457661 | 162 | 0.594802 |
f4835b298124d0d4a4049bada49833bdfcbf8e49 | 5,374 | py | Python | project_V1/core/test/test_view.py | arnabtarwani/UCD-MSc-Project | 3ab9a8fc0a092a8dd03099d1d5fa55eb127ef3dc | [
"MIT"
] | 1 | 2019-09-13T16:09:40.000Z | 2019-09-13T16:09:40.000Z | project_V1/core/test/test_view.py | arnabtarwani/Buddyup-ucd-msc-project | 3ab9a8fc0a092a8dd03099d1d5fa55eb127ef3dc | [
"MIT"
] | 9 | 2020-03-24T17:39:10.000Z | 2022-02-10T10:09:09.000Z | project_V1/core/test/test_view.py | arnabtarwani/UCD-MSc-Project | 3ab9a8fc0a092a8dd03099d1d5fa55eb127ef3dc | [
"MIT"
] | null | null | null | from django.test import TestCase,Client
from django.urls import reverse
from core.models import following,tweets_data,notification_data
from login.models import User_detail,Registration
class TestCoreViews(TestCase):
def test_twitter_check_3(self):
client=Client()
response=client.post(reverse("twitter_check"))
self.assertEquals(response.status_code,302)
def test_followers_check1(self):
client=Client()
response=client.get(reverse("follower"))
self.assertEquals(response.status_code,302)
def test_follower_check2(self):
client=Client()
session=client.session
session['username']="Akashsri"
session.save()
response=client.post(reverse("follower"),{
"friend":"buddyupucd",
"status":"Grant"
})
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response,"followers.html")
def test_follower_check3(self):
client=Client()
session=client.session
session['username']="Akashsri"
session.save()
response=client.post(reverse("follower"),{
"friend":"buddyupucd",
"status":"Revoke"
})
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response,"followers.html")
def test_trend_check1(self):
client=Client()
response=client.get(reverse("trend"))
self.assertEquals(response.status_code,302)
def test_trend_check2(self):
following.objects.create(
user_id="Akashsri",
twitter_handle="buddyupucd",
friend_Email="abc@gmail.com",
url=",",
isActive=1
)
client=Client()
session=client.session
session['username']="Akashsri"
session.save()
response=client.post(reverse("trend"),{
"friend":"buddyupucd"
})
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response,"trend.html")
def test_trend_check3(self):
client=Client()
session=client.session
session['username']="Akashsri"
session.save()
response=client.post(reverse("trend"))
self.assertEquals(response.status_code,302)
#self.assertTemplateUsed(response,"trend.html")
def test_addfriend(self):
client=Client()
session=client.session
session['username']="Akashsrivastava6"
session.save()
following.objects.create(
user_id=session['username'],
twitter_handle="buddyupucd",
friend_Email="Akashsrivastava6@gmail.com",
url="",
isActive=1
)
response=client.post(reverse("addfriend"),{
"twitter_handle":"buddyupucd",
"email":"Akashsrivastava6@gmail.com"})
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response,"dashboard.html")
def test_addfriend_without_session(self):
client=Client()
response=client.post(reverse("addfriend"),{
"twitter_handle":"buddyupucd",
"email":"Akashsrivastava6@gmail.com"})
self.assertEquals(response.status_code,302)
#self.assertTemplateUsed(response,"login")
def test_twitter_check(self):
User_detail.objects.create(
username="Akashsrivastava6",
)
client=Client()
session=client.session
session['username']="Akashsrivastava6"
session.save()
response=client.post(reverse("twitter_check"),{
"username":"Akashsrivastava6@gmail.com",
"password":"Akashsri",
"fname":"Akash",
"lname":"Srivastava",
"dob":"1992-03-03"
})
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response,"dashboard.html")
def test_twitter_check_1(self):
User_detail.objects.create(
username="Akashsrivastava6",
)
following.objects.create(
user_id="Akashsrivastava",
twitter_handle="Akashsrivastava6",
friend_Email="abs@gmail.com",
url="",
isActive=1
)
client=Client()
session=client.session
session['username']="Akashsrivastava6"
session.save()
response=client.post(reverse("twitter_check"),{
"username":"Akashsrivastava6@gmail.com",
"password":"Akashsri",
"fname":"Akash",
"lname":"Srivastava",
"dob":"1992-03-03"
})
self.assertEquals(response.status_code,200)
self.assertTemplateUsed(response,"dashboard.html")
# def test_twitter_check_2(self):
# client=Client()
# session=client.session
# session['username']="Akashsrivastava6"
# session.save()
# response=client.post(reverse("twitter_check"),{
# "username":"Akashsrivast@gmail.com",
# "password":"Akashsri",
# "fname":"Akash",
# "lname":"Srivastava",
# "dob":"1992-03-04"
# })
# self.assertEquals(response.status_code,200)
# self.assertTemplateUsed(response,"dashboard.html")
| 28.737968 | 63 | 0.593599 |
7f19a8c27d2304f5964cf15aa930b9f08c7c016c | 23,170 | py | Python | sonorant/languagemodel.py | colinpollock/sonorous | a6a2792273f6962f7cc6f2814a0859373e945d78 | [
"MIT"
] | 2 | 2020-03-25T00:55:27.000Z | 2021-02-21T12:43:05.000Z | sonorant/languagemodel.py | colinpollock/sonorous | a6a2792273f6962f7cc6f2814a0859373e945d78 | [
"MIT"
] | 9 | 2020-03-02T22:24:17.000Z | 2020-03-06T01:09:03.000Z | sonorant/languagemodel.py | colinpollock/sonorous | a6a2792273f6962f7cc6f2814a0859373e945d78 | [
"MIT"
] | 1 | 2020-03-06T01:44:41.000Z | 2020-03-06T01:44:41.000Z | """A language model implemented in Torch.
The main class of interest is `LanguageModel`. It can be trained on a sequence of texts, where each
text is a tuple of tokens. This means that tokenization needs to happen before interacting with
LanguageModel. Below is a short walkthrough of everything you'd need from this class.
# Import these three classes.
>>> from sonorant.languagemodel import LanguageModel, ModelParams, Vocabulary
# Define the train and dev texts. Build a `Vocabulary` from the texts, which handles the mapping
# of tokens like "a" to integer indices.
>>> train_texts = [("a", "cat", "ate"), ("some", "dogs", "slept")]
>>> dev_texts = [("some", "cat", "ate"), ("dogs", "slept")]
>>> vocab = Vocabulary.from_texts(train_texts + dev_texts)
>>> len(vocab)
Out[1]: 9
>>> vocab['a']
Out[2]: 3
# Define ModelParams, which encapsulate the hyperparameters for a model. This is a useful
# abstraction that allows parameters to be passed around as a group rather than one by one and aids
# serialization.
>>> model_params = ModelParams(
rnn_type="gru",
embedding_dimension=50,
hidden_dimension=30,
num_layers=1,
max_epochs=2,
early_stopping_rounds=5,
)
# A model is defined by a vocabulary, model parameters, and the name of the device on which it'll
# run. Any Torch devices will work, but you probably want "cuda" if you're running on a GPU and
# "cpu" otherwise.
>>> model = LanguageModel(vocab, model_params, device_name="cpu")
# To train a model pass a sequence of train texts and dev texts to the `fit` function. At the end
# of every epoch the model prints out the loss for the dev set. Note that `max_epochs` and a few
# other parameters set in `model_params` can be overriddedn by passing them to `fit`.
>>> train_errors, dev_errors = model.fit(train_texts, dev_texts, max_epochs=10)
# Now I'll run through some basic operations over a trained model.
# You can calculate the perplexity of any text. Perplexity is basically the length normalized,
# inverse probability.
# length normalized.
>>> model.perplexity_of_text(dev_texts[0])
Out[3]: 14.466998156671036
# You can pass in a tuple of tokens and the model will return a probability distribution over the
# vocabulary for its predictions of which token will come next.
>>> model.next_probabilities(("a", "cat"))
Out[4]:
{'<PAD>': 0.0008051212062127888,
'<START>': 0.004382132086902857,
'<END>': 0.010958804748952389,
'a': 0.00777098536491394,
'cat': 0.005946762394160032,
'ate': 0.944864809513092,
'some': 0.00814706552773714,
'dogs': 0.0017555770464241505,
'slept': 0.015368768945336342}
# You can generate novel texts.
>>> model.generate(max_length=1000)
Out[5]: ('dogs', 'cat')
# You can save the model to disk and then load it.
>>> with open('model.pt', 'wb') as fh:
model.save(fh)
>>> with open('model.pt', 'rb') as fh:
the_same_model = LanguageModel.load(fh, device_name='cpu')
"""
import math
import sys
from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from sonorant.utils import (
get_rnn_model_by_name,
get_torch_device_by_name,
has_decreased,
count_origins,
perplexity,
)
class ModelParams(NamedTuple):
"""Holder of hyperparameters for a model.
- rnn_type: a string indicating the type of RNN ('rnn', 'lstm', 'gru').
- embedding_dimension: the length of each token's embedding vector.
- hidden_dimension: the size of the RNN/LSTM/GRU's hidden layer.
- num_layers: number of layers in the RNN. Defaults to 1.
- max_epochs: the maximum number of epochs to train for. Note that this an
argument to the model rather than the `fit` method so that it's easier to
automate group all the hyperparameters in one place.
- early_stopping_rounds: The model will train until the train score stops
improving. Train error needs to decrease at least every
early_stopping_rounds to continue training.
- learning_rate: defaults to 1--3
- dropout: defaults to 0
- l2_strength: L2 regularization strength. Default of 0 is no regularization.
- batch_size: defaults to 1024
"""
rnn_type: str
embedding_dimension: int
hidden_dimension: int
num_layers: int
max_epochs: int
early_stopping_rounds: int
learning_rate: float = 1e-3
dropout: float = 0
l2_strength: float = 0
batch_size: int = 1024
class LanguageModel(nn.Module):
"""A trainable model built on top of PyTorch."""
def __init__(
self, vocab: "Vocabulary", model_params: ModelParams, device_name: Optional[str]
):
super(LanguageModel, self).__init__()
self.vocab = vocab
self.model_params = model_params
self.device = get_torch_device_by_name(device_name)
# Layers
self._encoder = nn.Embedding(len(self.vocab), model_params.embedding_dimension)
rnn_model = get_rnn_model_by_name(model_params.rnn_type)
self._rnn = rnn_model(
input_size=model_params.embedding_dimension,
hidden_size=model_params.hidden_dimension,
num_layers=model_params.num_layers,
batch_first=True,
)
self._decoder = nn.Linear(model_params.hidden_dimension, len(self.vocab))
self._dropout = nn.Dropout(model_params.dropout)
self._l2_strength = model_params.l2_strength
self.to(self.device)
def forward(self, inputs, hidden_state=None):
inputs = inputs.to(self.device)
embedded = self._encoder(inputs)
rnn_output, new_hidden_state = self._rnn(embedded, hidden_state)
rnn_output = self._dropout(rnn_output)
return self._decoder(rnn_output), new_hidden_state
def fit(
self,
train_texts: Sequence[Tuple[str]],
dev_texts: Sequence[Tuple[str]] = None,
learning_rate: float = None,
max_epochs: int = None,
early_stopping_rounds: int = None,
batch_size: int = None,
print_every: int = 1,
) -> Tuple[List[float], List[float]]:
"""Fit the model to the training data.
Args:
- train_texts: list of lists of tokens. If the vocabulary is words then it would look like
this: [["a", "cat"], ["the", "dog", ...]].
- dev_texts: same format as `train_texts`, but used for printing out dev set errors during
testing.
- learning_rate: learning rate. Defaults to self.model_params.learning_rate if None.
- max_epochs: the maximum number of epochs to train for. Defaults to self.max_epochs.
- early_stopping_rounds: The model will train until the dev score stops improving. Train
error needs to decrease at least every early_stopping_rounds to continue training.
- batch_size: batch size for both train and assess. Defaults to self.batch_size.
- print_every: how often to print a status line showing metrics. Defaults to printing every
epoch.
Returns a pair (train_losses: float, dev_losses: float), which are the
losses at each epoch.
"""
# Set None parameters passed in to the their default values in `self`.
learning_rate = (
learning_rate
if learning_rate is not None
else self.model_params.learning_rate
)
max_epochs = (
max_epochs if max_epochs is not None else self.model_params.max_epochs
)
early_stopping_rounds = (
early_stopping_rounds
if early_stopping_rounds is not None
else self.model_params.early_stopping_rounds
)
batch_size = (
batch_size if batch_size is not None else self.model_params.batch_size
)
self.to(self.device)
optimizer = Adam(
self.parameters(),
lr=learning_rate,
weight_decay=self.model_params.l2_strength,
)
criterion = nn.CrossEntropyLoss()
train_loader = build_data_loader(train_texts, self.vocab, batch_size)
dev_loader = (
build_data_loader(dev_texts, self.vocab, batch_size)
if dev_texts is not None
else None
)
train_losses: List[float] = []
dev_losses = []
for epoch in range(1, max_epochs + 1):
if not has_decreased(train_losses, early_stopping_rounds):
train_loss, dev_loss = self._eval_and_print(
epoch,
train_texts,
train_loader,
dev_texts,
dev_loader,
print_status=True,
)
print(
f"Early stopping because of no decrease in {early_stopping_rounds} epochs.",
file=sys.stderr,
)
break
self.train()
train_epoch_loss = 0.0
for batch_num, (inputs, targets) in enumerate(train_loader, start=1):
inputs = inputs.to(self.device)
targets = targets.to(self.device)
outputs, _ = self(inputs, hidden_state=None)
loss = criterion(outputs.permute(0, 2, 1), targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_epoch_loss += loss.item()
print(
"Epoch {}; Batch {} of {}; loss: {:.4f}{}".format(
epoch, batch_num, len(train_loader), loss.item(), " " * 100
),
end="\r",
)
print_status = epoch % print_every == 0 or epoch == max_epochs
train_loss, dev_loss = self._eval_and_print(
epoch, train_texts, train_loader, dev_texts, dev_loader, print_status
)
train_losses.append(train_loss)
dev_losses.append(dev_loss)
return train_losses, dev_losses
def _eval_and_print(
self,
epoch: int,
train_texts: Sequence[Tuple[str, ...]],
train_loader: DataLoader,
dev_texts: Optional[Sequence[Tuple[str, ...]]] = None,
dev_loader: Optional[DataLoader] = None,
print_status: Optional[bool] = True,
) -> Tuple[float, float]:
"""Method to print out training progress, useful after an epoch has completed.
It returns the loss against the training and dev sets, and if `print_status` is True also
prints out those losses and shows some examples of what the model generates.
Note that if `dev_texts` or `dev_loader` is None then the second return value will be 0.
"""
train_loss = self.evaluate(train_loader)
status = f"Epoch {epoch}: train loss: {train_loss:.4f}"
if dev_loader is not None:
dev_loss = self.evaluate(dev_loader)
status += f"\tdev loss: {dev_loss:.4f}"
else:
dev_loss = 0.0
if print_status:
print(status)
generated_texts = [self.generate(1000) for _ in range(100)]
(
percent_train_origin,
percent_dev_origin,
percent_novel_origin,
) = count_origins(
generated_texts, train_texts, dev_texts if dev_texts is not None else []
)
print(
f"\tGenerated: in train: {percent_train_origin}%, assess: {percent_dev_origin}%, "
f"novel: {percent_novel_origin}%"
)
for text in generated_texts[:5]:
print("\t", " ".join(text))
return train_loss, dev_loss
def evaluate(self, loader: DataLoader) -> float:
"""Compute the average entropy per symbol on the input loader.
Loss for every single predicted token is summed and that result is
divided by the total number of tokens seen. Note that the base for
entropy is e rather than 2.
Per-symbol is useful since the return value for different size
loaders or different batch sizes are the same.
"""
self.eval()
criterion = nn.CrossEntropyLoss(reduction="sum")
loss = 0.0
total_tokens = 0
for inputs, targets in loader:
total_tokens += inputs.numel()
inputs = inputs.to(self.device)
targets = targets.to(self.device)
with torch.no_grad():
outputs, _ = self(inputs, hidden_state=None)
loss += criterion(outputs.permute(0, 2, 1), targets).item()
return loss / total_tokens
def generate(
self, max_length: int = 100, temperature: float = 1
) -> Tuple[str, ...]:
"""Generate a new text.
Args:
- max_length: the maximum number of tokens to generate. Defaults to 100.
- temperature: higher will increase diversity, lower will more often select the top
probability tokens. Defaults to 1, which has no effect.
Returns: a tuple of tokens.
"""
self.eval()
generated = []
token_idx = self.vocab.START_IDX
hidden_state = None
for _ in range(max_length):
input_ = torch.LongTensor([token_idx]).unsqueeze(0)
output, hidden_state = self(input_, hidden_state)
probabilities = F.softmax(output.squeeze().div(temperature), dim=0)
token_idx = int(torch.multinomial(probabilities, 1).item())
token = self.vocab.token_from_idx(token_idx)
if token in self.vocab.DUMMY_TOKENS:
break
generated.append(token)
return tuple(generated)
def next_probabilities(self, text: Tuple[str]) -> Dict[str, float]:
"""Return the probability of each token coming next.
Args:
- text: a sequence of tokens.
Returns: a dict mapping each token in the vocabulary to a probability.
"""
# Dropping the final token, which is the END token.
encoded = self.vocab.encode_text(text)[:-1]
input_ = torch.LongTensor(encoded).unsqueeze(0)
output, _ = self(input_)
probabilities = F.softmax(output, dim=-1)
next_token_probabilities = probabilities[0, -1, :]
return {
self.vocab.token_from_idx(idx): probability.item()
for idx, probability in enumerate(next_token_probabilities)
}
def conditional_probabilities_of_text(
self, text: Tuple[str, ...]
) -> Tuple[float, ...]:
"""Returns the probability of each token in `text`.
If `text` has two tokens (t1 and t2) then the returned tuple will be of length 3:
1. P(t1|START)
2. P(t2|START t1)
3. P(END|START t1 t2)
"""
encoded_text = self.vocab.encode_text(text)
output, _ = self(torch.LongTensor(encoded_text).unsqueeze(0))
output = F.softmax(output, dim=-1).squeeze()
# At each step a distribution over all tokens is output. This represents
# the model's predictions for what the next token will be. We pull out
# the probability for whatever the next token actually is, and end up
# with the probabilities for each of the actual tokens. Through the
# chain rule we can get the overall probability for the full text.
probabilities = []
for step, next_token_idx in enumerate(encoded_text[1:]):
probabilities.append(output[step, next_token_idx].item())
return tuple(probabilities)
def probability_of_text(self, text: Tuple[str, ...]) -> float:
"""Calculate the probability of the given text.
Args:
- text: a sequence of tokens.
Returns: the probability.
"""
total_logprob = 0.0
for probability in self.conditional_probabilities_of_text(text):
total_logprob += math.log(probability)
return math.exp(total_logprob)
def perplexity_of_text(self, text: Tuple[str, ...]) -> float:
"""Calculate the perplexity of the given text.
Note that this calls `probability_of_text`. That then calls
`conditional_probabilities_of_text`, which is fairly expensive.
"""
probability = self.probability_of_text(text)
return perplexity(probability, len(text))
def embedding_for(self, token: str):
"""Return the embedding for the specified token.
Args:
- token: a string present in `self.vocab`.
Returns: a 1x`embedding_dimension` NumPy array.
"""
self.eval()
with torch.no_grad():
token_idx = self.vocab[token]
return (
self._encoder(torch.LongTensor([token_idx]).to(self.device))
.cpu()
.numpy()
)
@property
def embeddings(self):
"""Return the embeddings as a NumPy array."""
return self._encoder.weight.cpu().detach().numpy()
def save(self, file_handle):
"""Save a file to disk."""
data = {
"token_to_idx": self.vocab.token_to_idx,
"model_params": self.model_params._asdict(),
"state_dict": self.state_dict(),
}
torch.save(data, file_handle)
@staticmethod
def load(file_handle, device_name: str) -> "LanguageModel":
"""Load a model from disk that has been saved using the `save` method."""
data = torch.load(file_handle, map_location=torch.device(device_name))
vocab = Vocabulary(data["token_to_idx"])
model_params = ModelParams(**data["model_params"])
state_dict = data["state_dict"]
model = LanguageModel(vocab, model_params, device_name)
model.load_state_dict(state_dict)
return model
class Vocabulary:
"""A vocabulary over tokens and operations on it.
A Vocabulary is initialized by passing in a list of texts, where a text is
a list of tokens. It is immutable and cannot be udpated after being
initialized.
>>> vocab = Vocabulary.from_texts([['a', 'b', 'c'], ['c', 'd']])
>>> 'a' in vocab
True
>>> len(vocab)
4
>>> vocab.token_from_idx(vocab['b'])
'b'
"""
PAD = "<PAD>"
START = "<START>"
END = "<END>"
DUMMY_TOKENS = {PAD, START, END}
PAD_IDX = 0
START_IDX = 1
END_IDX = 2
def __init__(self, token_to_idx: Dict[str, int]):
self.token_to_idx = token_to_idx
self._idx_to_token = {idx: token for (token, idx) in token_to_idx.items()}
# Note that this only works because a Vocabulary is immutable
# and no tokens can be added outside of __init__.
self.tokens = set(token_to_idx)
self.indices = set(self._idx_to_token)
@classmethod
def from_texts(cls, texts: Sequence[Tuple[str]]) -> "Vocabulary":
"""Initialize a `Vocabulary` from a Sequence of texts."""
token_to_idx = cls._build_token_to_idx(texts)
return Vocabulary(token_to_idx)
def encode_text(self, text: Tuple[str, ...], is_target: bool = False):
"""Encode a text as an array of int indices.
Args:
- text: a tuple of tokens.
- is_target: bool indicating how to wrap the output in START and END
indicators. If False then the result is [START, ...tokens..., END]. If
True then the result is [...tokens..., END, END]. This is useful for
the target since it offsets each target token by one and lines up the
RNN's prediction for the next token with the target.
Returns: a NumPy array of ints representing each token.
"""
if is_target is True:
with_boundaries = text + (self.END, self.PAD)
else:
with_boundaries = (self.START,) + text + (self.END,)
return np.array([self[token] for token in with_boundaries])
def __getitem__(self, token: str) -> int:
idx = self.token_to_idx.get(token)
if idx is None:
# Note that this could also just return an UNK value, which would be
# another dummy like PAD, but I haven't needed it yet.
raise KeyError(f"Token '{token}' is not in the vocabulary")
return idx
def __contains__(self, token: str) -> bool:
return token in self.token_to_idx
def token_from_idx(self, idx: int) -> str:
"""Return the token with the specified index.
Raises `KeyError` if it's missing.
"""
token = self._idx_to_token[idx]
if token is None:
raise KeyError(f"Token index '{idx}' is not in the vocabulary")
return token
def __eq__(self, other):
if not isinstance(other, Vocabulary):
return False
return self.token_to_idx == other.token_to_idx
def __len__(self):
return len(self.token_to_idx)
@classmethod
def _build_token_to_idx(cls, texts: Sequence[Tuple[str]]) -> Dict[str, int]:
"""Build a token-to-index dictionary.
Args:
- texts: a collection of texts, each of which is a list of tokens.
Returns: a dict mapping tokens to int indices.
"""
tokens = {token for text in texts for token in text}
if any(dummy in tokens for dummy in cls.DUMMY_TOKENS):
raise ValueError(f"Input text contains a reserved dummy token")
tokens.update(cls.DUMMY_TOKENS)
token_to_idx = {
cls.PAD: cls.PAD_IDX,
cls.START: cls.START_IDX,
cls.END: cls.END_IDX,
}
for text in texts:
for token in text:
if token not in token_to_idx:
token_to_idx[token] = len(token_to_idx)
return token_to_idx
def build_data_loader(
texts: Sequence[Tuple[str]], vocab: Vocabulary, batch_size=128
) -> DataLoader:
"""Convert a list of texts into a LongTensor.
Args:
- texts: list of text, each of which is a list of tokens.
- vocab
- batch_size: the batch side for the resulting data loader.
Returns: a DataLoader wrapping a dataset that is a pair (inputs, targets). Each
of those in the pair is a LongTensor of dimension (num texts, max text length).
Since we're training a language model to predict the next token, the target for
a given input text is each of the tokens shifted one to the right.
input: <START> K AE T <END>
target: K AE T <END> <PAD>
"""
input_tensors = [
torch.LongTensor(vocab.encode_text(text, is_target=False)) for text in texts
]
target_tensors = [
torch.LongTensor(vocab.encode_text(text, is_target=True)) for text in texts
]
dataset = TensorDataset(
pad_sequence(input_tensors, batch_first=True, padding_value=vocab.PAD_IDX),
pad_sequence(target_tensors, batch_first=True, padding_value=vocab.PAD_IDX),
)
return DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
| 35.646154 | 99 | 0.629823 |
d2d6be629d7d8f647fe145678f7e56a35858f13f | 29,155 | py | Python | airflow/providers/amazon/aws/hooks/s3.py | Hartorn/airflow | a79e2d4c4aa105f3fac5ae6a28e29af9cd572407 | [
"Apache-2.0"
] | 1 | 2020-07-17T20:06:33.000Z | 2020-07-17T20:06:33.000Z | airflow/providers/amazon/aws/hooks/s3.py | Hartorn/airflow | a79e2d4c4aa105f3fac5ae6a28e29af9cd572407 | [
"Apache-2.0"
] | 37 | 2020-07-21T07:50:02.000Z | 2022-03-29T22:31:28.000Z | airflow/providers/amazon/aws/hooks/s3.py | vuppalli/airflow | dfe8337ca2d3ed173d9ecc112938271519792c40 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Interact with AWS S3, using the boto3 library.
"""
import fnmatch
import gzip as gz
import io
import re
import shutil
from functools import wraps
from inspect import signature
from tempfile import NamedTemporaryFile
from typing import Optional
from urllib.parse import urlparse
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils.helpers import chunks
def provide_bucket_name(func):
"""
Function decorator that provides a bucket name taken from the connection
in case no bucket name has been passed to the function.
"""
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs):
bound_args = function_signature.bind(*args, **kwargs)
if 'bucket_name' not in bound_args.arguments:
self = args[0]
if self.aws_conn_id:
connection = self.get_connection(self.aws_conn_id)
if connection.schema:
bound_args.arguments['bucket_name'] = connection.schema
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
def unify_bucket_name_and_key(func):
"""
Function decorator that unifies bucket name and key taken from the key
in case no bucket name and at least a key has been passed to the function.
"""
function_signature = signature(func)
@wraps(func)
def wrapper(*args, **kwargs):
bound_args = function_signature.bind(*args, **kwargs)
def get_key_name():
if 'wildcard_key' in bound_args.arguments:
return 'wildcard_key'
if 'key' in bound_args.arguments:
return 'key'
raise ValueError('Missing key parameter!')
key_name = get_key_name()
if key_name and 'bucket_name' not in bound_args.arguments:
bound_args.arguments['bucket_name'], bound_args.arguments[key_name] = \
S3Hook.parse_s3_url(bound_args.arguments[key_name])
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
class S3Hook(AwsBaseHook):
"""
Interact with AWS S3, using the boto3 library.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs):
super().__init__(client_type='s3', *args, **kwargs)
@staticmethod
def parse_s3_url(s3url):
"""
Parses the S3 Url into a bucket name and key.
:param s3url: The S3 Url to parse.
:rtype s3url: str
:return: the parsed bucket name and key
:rtype: tuple of str
"""
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name instead of "{s3url}"'.format(s3url=s3url))
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return bucket_name, key
@provide_bucket_name
def check_for_bucket(self, bucket_name=None):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
:return: True if it exists and False if not.
:rtype: bool
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except ClientError as e:
self.log.error(e.response["Error"]["Message"])
return False
@provide_bucket_name
def get_bucket(self, bucket_name=None):
"""
Returns a boto3.S3.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
:return: the bucket object to the bucket name.
:rtype: boto3.S3.Bucket
"""
s3_resource = self.get_resource_type('s3')
return s3_resource.Bucket(bucket_name)
@provide_bucket_name
def create_bucket(self, bucket_name=None, region_name=None):
"""
Creates an Amazon S3 bucket.
:param bucket_name: The name of the bucket
:type bucket_name: str
:param region_name: The name of the aws region in which to create the bucket.
:type region_name: str
"""
if not region_name:
region_name = self.get_conn().meta.region_name
if region_name == 'us-east-1':
self.get_conn().create_bucket(Bucket=bucket_name)
else:
self.get_conn().create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region_name
})
@provide_bucket_name
def check_for_prefix(self, prefix, delimiter, bucket_name=None):
"""
Checks that a prefix exists in a bucket
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:return: False if the prefix does not exist in the bucket and True if it does.
:rtype: bool
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
@provide_bucket_name
def list_prefixes(self, bucket_name=None, prefix='', delimiter='',
page_size=None, max_items=None):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
:return: a list of matched prefixes and None if there are none.
:rtype: list
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config)
has_results = False
prefixes = []
for page in response:
if 'CommonPrefixes' in page:
has_results = True
for common_prefix in page['CommonPrefixes']:
prefixes.append(common_prefix['Prefix'])
if has_results:
return prefixes
return None
@provide_bucket_name
def list_keys(self, bucket_name=None, prefix='', delimiter='',
page_size=None, max_items=None):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
:return: a list of matched keys and None if there are none.
:rtype: list
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config)
has_results = False
keys = []
for page in response:
if 'Contents' in page:
has_results = True
for k in page['Contents']:
keys.append(k['Key'])
if has_results:
return keys
return None
@provide_bucket_name
@unify_bucket_name_and_key
def check_for_key(self, key, bucket_name=None):
"""
Checks if a key exists in a bucket
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:return: True if the key exists and False if not.
:rtype: bool
"""
try:
self.get_conn().head_object(Bucket=bucket_name, Key=key)
return True
except ClientError as e:
self.log.error(e.response["Error"]["Message"])
return False
@provide_bucket_name
@unify_bucket_name_and_key
def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:return: the key object from the bucket
:rtype: boto3.s3.Object
"""
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj
@provide_bucket_name
@unify_bucket_name_and_key
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:return: the content of the key
:rtype: boto3.s3.Object
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
@provide_bucket_name
@unify_bucket_name_and_key
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event)
@provide_bucket_name
@unify_bucket_name_and_key
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:param delimiter: the delimiter marks key hierarchy
:type delimiter: str
:return: True if a key exists and False if not.
:rtype: bool
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
@provide_bucket_name
@unify_bucket_name_and_key
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto3.s3.Object object matching the wildcard expression
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
:param delimiter: the delimiter marks key hierarchy
:type delimiter: str
:return: the key object from the bucket or None if none has been found.
:rtype: boto3.s3.Object
"""
prefix = re.split(r'[*]', wildcard_key, 1)[0]
key_list = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if key_list:
key_matches = [k for k in key_list if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
return None
@provide_bucket_name
@unify_bucket_name_and_key
def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False,
gzip=False,
acl_policy=None):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param gzip: If True, the file will be compressed locally
:type gzip: bool
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:type acl_policy: str
"""
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
if gzip:
filename_gz = ''
with open(filename, 'rb') as f_in:
filename_gz = f_in.name + '.gz'
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
if acl_policy:
extra_args['ACL'] = acl_policy
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
@provide_bucket_name
@unify_bucket_name_and_key
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8',
acl_policy=None):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: str to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param encoding: The string to byte encoding
:type encoding: str
:param acl_policy: The string to specify the canned ACL policy for the
object to be uploaded
:type acl_policy: str
"""
bytes_data = string_data.encode(encoding)
file_obj = io.BytesIO(bytes_data)
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
file_obj.close()
@provide_bucket_name
@unify_bucket_name_and_key
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
acl_policy=None):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param acl_policy: The string to specify the canned ACL policy for the
object to be uploaded
:type acl_policy: str
"""
file_obj = io.BytesIO(bytes_data)
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
file_obj.close()
@provide_bucket_name
@unify_bucket_name_and_key
def load_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False,
acl_policy=None):
"""
Loads a file object to S3
:param file_obj: The file-like object to set as the content for the S3 key.
:type file_obj: file-like object
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag that indicates whether to overwrite the key
if it already exists.
:type replace: bool
:param encrypt: If True, S3 encrypts the file on the server,
and the file is stored in encrypted form at rest in S3.
:type encrypt: bool
:param acl_policy: The string to specify the canned ACL policy for the
object to be uploaded
:type acl_policy: str
"""
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
def _upload_file_obj(self,
file_obj,
key,
bucket_name=None,
replace=False,
encrypt=False,
acl_policy=None):
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
if acl_policy:
extra_args['ACL'] = acl_policy
client = self.get_conn()
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
def copy_object(self,
source_bucket_key,
dest_bucket_key,
source_bucket_name=None,
dest_bucket_name=None,
source_version_id=None,
acl_policy='private'):
"""
Creates a copy of an object that is already stored in S3.
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
:param source_bucket_key: The key of the source object.
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:type source_bucket_key: str
:param dest_bucket_key: The key of the object to copy to.
The convention to specify `dest_bucket_key` is the same
as `source_bucket_key`.
:type dest_bucket_key: str
:param source_bucket_name: Name of the S3 bucket where the source object is in.
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:type source_bucket_name: str
:param dest_bucket_name: Name of the S3 bucket to where the object is copied.
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:type dest_bucket_name: str
:param source_version_id: Version ID of the source object (OPTIONAL)
:type source_version_id: str
:param acl_policy: The string to specify the canned ACL policy for the
object to be copied which is private by default.
:type acl_policy: str
"""
if dest_bucket_name is None:
dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key)
else:
parsed_url = urlparse(dest_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If dest_bucket_name is provided, ' +
'dest_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
if source_bucket_name is None:
source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key)
else:
parsed_url = urlparse(source_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If source_bucket_name is provided, ' +
'source_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
copy_source = {'Bucket': source_bucket_name,
'Key': source_bucket_key,
'VersionId': source_version_id}
response = self.get_conn().copy_object(Bucket=dest_bucket_name,
Key=dest_bucket_key,
CopySource=copy_source,
ACL=acl_policy)
return response
@provide_bucket_name
def delete_bucket(self, bucket_name: str, force_delete: bool = False) -> None:
"""
To delete s3 bucket, delete all s3 bucket objects and then delete the bucket.
:param bucket_name: Bucket name
:type bucket_name: str
:param force_delete: Enable this to delete bucket even if not empty
:type force_delete: bool
:return: None
:rtype: None
"""
if force_delete:
bucket_keys = self.list_keys(bucket_name=bucket_name)
if bucket_keys:
self.delete_objects(bucket=bucket_name, keys=bucket_keys)
self.conn.delete_bucket(
Bucket=bucket_name
)
def delete_objects(self, bucket, keys):
"""
Delete keys from the bucket.
:param bucket: Name of the bucket in which you are going to delete object(s)
:type bucket: str
:param keys: The key(s) to delete from S3 bucket.
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
:type keys: str or list
"""
if isinstance(keys, str):
keys = [keys]
s3 = self.get_conn()
# We can only send a maximum of 1000 keys per request.
# For details see:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_objects
for chunk in chunks(keys, chunk_size=1000):
response = s3.delete_objects(
Bucket=bucket,
Delete={"Objects": [{"Key": k} for k in chunk]}
)
deleted_keys = [x['Key'] for x in response.get("Deleted", [])]
self.log.info("Deleted: %s", deleted_keys)
if "Errors" in response:
errors_keys = [x['Key'] for x in response.get("Errors", [])]
raise AirflowException("Errors when deleting: {}".format(errors_keys))
@provide_bucket_name
@unify_bucket_name_and_key
def download_file(
self,
key: str,
bucket_name: Optional[str] = None,
local_path: Optional[str] = None
) -> str:
"""
Downloads a file from the S3 location to the local file system.
:param key: The key path in S3.
:type key: str
:param bucket_name: The specific bucket to use.
:type bucket_name: Optional[str]
:param local_path: The local path to the downloaded file. If no path is provided it will use the
system's temporary directory.
:type local_path: Optional[str]
:return: the file name.
:rtype: str
"""
self.log.info('Downloading source S3 file from Bucket %s with path %s', bucket_name, key)
if not self.check_for_key(key, bucket_name):
raise AirflowException(f'The source file in Bucket {bucket_name} with path {key} does not exist')
s3_obj = self.get_key(key, bucket_name)
with NamedTemporaryFile(dir=local_path, prefix='airflow_tmp_', delete=False) as local_tmp_file:
s3_obj.download_fileobj(local_tmp_file)
return local_tmp_file.name
def generate_presigned_url(self, client_method, params=None, expires_in=3600, http_method=None):
"""
Generate a presigned url given a client, its method, and arguments
:param client_method: The client method to presign for.
:type client_method: str
:param params: The parameters normally passed to ClientMethod.
:type params: dict
:param expires_in: The number of seconds the presigned url is valid for.
By default it expires in an hour (3600 seconds).
:type expires_in: int
:param http_method: The http method to use on the generated url.
By default, the http method is whatever is used in the method's model.
:type http_method: str
:return: The presigned url.
:rtype: str
"""
s3_client = self.get_conn()
try:
return s3_client.generate_presigned_url(ClientMethod=client_method,
Params=params,
ExpiresIn=expires_in,
HttpMethod=http_method)
except ClientError as e:
self.log.error(e.response["Error"]["Message"])
return None
| 37.1875 | 117 | 0.597873 |
75c111c58003b486bcee95d0672c44f12a8a3879 | 22,318 | py | Python | activity/activity_PubmedArticleDeposit.py | gnott/elife-bot | 584c315d15d1289e0d2c27c28aaaae31174812e4 | [
"MIT"
] | null | null | null | activity/activity_PubmedArticleDeposit.py | gnott/elife-bot | 584c315d15d1289e0d2c27c28aaaae31174812e4 | [
"MIT"
] | null | null | null | activity/activity_PubmedArticleDeposit.py | gnott/elife-bot | 584c315d15d1289e0d2c27c28aaaae31174812e4 | [
"MIT"
] | null | null | null | import os
import boto.swf
import json
import importlib
import time
import arrow
import glob
import re
import requests
from collections import namedtuple
import activity
import boto.s3
from boto.s3.connection import S3Connection
import provider.simpleDB as dblib
import provider.article as articlelib
import provider.s3lib as s3lib
import provider.lax_provider as lax_provider
"""
PubmedArticleDeposit activity
"""
class activity_PubmedArticleDeposit(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "PubmedArticleDeposit"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 30
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 15
self.description = ("Download article XML from pubmed outbox, generate pubmed " +
"article XML, and deposit with pubmed.")
# Directory where POA library is stored
self.poa_lib_dir_name = "elife-poa-xml-generation"
# Where we specify the library to be imported
self.elife_poa_lib = None
# Import the libraries we will need
self.import_imports()
# Create output directories
self.create_activity_directories()
self.date_stamp = self.set_datestamp()
# Data provider where email body is saved
self.db = dblib.SimpleDB(settings)
# Instantiate a new article object to provide some helper functions
self.article = articlelib.article(self.settings, self.get_tmp_dir())
# Bucket for outgoing files
self.publish_bucket = settings.poa_packaging_bucket
self.outbox_folder = "pubmed/outbox/"
self.published_folder = "pubmed/published/"
# Track the success of some steps
self.activity_status = None
self.generate_status = None
self.approve_status = None
self.ftp_status = None
self.outbox_status = None
self.publish_status = None
self.outbox_s3_key_names = None
# Track XML files selected for pubmed XML
self.xml_file_to_doi_map = {}
self.article_published_file_names = []
self.article_not_published_file_names = []
def do_activity(self, data=None):
"""
Activity, do the work
"""
if self.logger:
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
# Download the S3 objects
self.download_files_from_s3_outbox()
# Generate pubmed XML
self.generate_status = self.generate_pubmed_xml()
# Approve files for publishing
self.approve_status = self.approve_for_publishing()
if self.approve_status is True:
try:
# Publish files
self.ftp_files_to_endpoint(
from_dir=self.elife_poa_lib.settings.TMP_DIR,
file_type="/*.xml",
sub_dir="")
self.ftp_status = True
except:
self.ftp_status = False
if self.ftp_status is True:
# Clean up outbox
print "Moving files from outbox folder to published folder"
self.clean_outbox()
self.upload_pubmed_xml_to_s3()
self.outbox_status = True
if self.ftp_status is True:
self.publish_status = True
elif self.ftp_status is False:
self.publish_status = False
# Set the activity status of this activity based on successes
if self.publish_status is not False:
self.activity_status = True
else:
self.activity_status = False
# Send email
# Only if there were files approved for publishing
if len(self.article_published_file_names) > 0:
self.add_email_to_queue()
# Return the activity result, True or False
result = True
return result
def set_datestamp(self):
a = arrow.utcnow()
date_stamp = (str(a.datetime.year) + str(a.datetime.month).zfill(2) +
str(a.datetime.day).zfill(2))
return date_stamp
def download_files_from_s3_outbox(self):
"""
Connect to the S3 bucket, and from the outbox folder,
download the .xml and .pdf files to be bundled.
"""
file_extensions = []
file_extensions.append(".xml")
bucket_name = self.publish_bucket
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id,
self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
s3_key_names = s3lib.get_s3_key_names_from_bucket(
bucket=bucket,
prefix=self.outbox_folder,
file_extensions=file_extensions)
for name in s3_key_names:
# Download objects from S3 and save to disk
s3_key = bucket.get_key(name)
filename = name.split("/")[-1]
# Save .xml and .pdf to different folders
if re.search(".*\\.xml$", name):
dirname = self.elife_poa_lib.settings.STAGING_TO_HW_DIR
filename_plus_path = dirname + os.sep + filename
mode = "wb"
f = open(filename_plus_path, mode)
s3_key.get_contents_to_file(f)
f.close()
def parse_article_xml(self, article_xml_files):
"""
Given a list of article XML files, parse them into objects
and save the file name for later use
"""
# For each article XML file, parse it and save the filename for later
articles = []
for article_xml in article_xml_files:
article_list = None
article_xml_list = [article_xml]
try:
# Convert the XML files to article objects
article_list = self.elife_poa_lib.parse.build_articles_from_article_xmls(
article_xml_list)
except:
continue
# Set the published date on v2, v3 etc. files
if article_xml.find('v') > -1:
article = None
if len(article_list) > 0:
article = article_list[0]
pub_date_date = self.article.get_article_bucket_pub_date(article.doi, "poa")
if article is not None and pub_date_date is not None:
# Emmulate the eLifeDate object use in the POA generation package
eLifeDate = namedtuple("eLifeDate", "date_type date")
pub_date = eLifeDate("pub", pub_date_date)
article.add_date(pub_date)
if len(article_list) > 0:
article = article_list[0]
articles.append(article)
# Add article to the DOI to file name map
self.xml_file_to_doi_map[article.doi] = article_xml
return articles
def get_article_version_from_lax(self, article_id):
"""
Temporary fix to set the version of the article if available
"""
version = lax_provider.article_highest_version(article_id, self.settings)
if version is None:
return "-1"
return version
def generate_pubmed_xml(self):
"""
Using the POA generatePubMedXml module
"""
article_xml_files = glob.glob(self.elife_poa_lib.settings.STAGING_TO_HW_DIR + "/*.xml")
articles = self.parse_article_xml(article_xml_files)
# For each VoR article, set was_ever_poa property
published_articles = []
for article in articles:
xml_file_name = self.xml_file_to_doi_map[article.doi]
article_id = self.article.get_doi_id(article.doi)
# Check if article was ever poa
# Must be set to True or False to get it published
if article.doi and article.doi == '10.7554/eLife.11190':
# Edge case, ignore this article PoA
article.was_ever_poa = False
else:
article.was_ever_poa = lax_provider.was_ever_poa(article_id, self.settings)
# Check if each article is published
is_published = lax_provider.published_considering_poa_status(
article_id=article_id,
settings=self.settings,
is_poa=article.is_poa,
was_ever_poa=article.was_ever_poa)
if is_published is True:
# Try to add the article version if in lax
try:
version = self.get_article_version_from_lax(article_id)
except:
version = None
if version and version > 0:
article.version = version
# Add published article object to be processed
published_articles.append(article)
# Add filename to the list of published files
self.article_published_file_names.append(xml_file_name)
else:
# Add the file to the list of not published articles, may be used later
self.article_not_published_file_names.append(xml_file_name)
# Will write the XML to the TMP_DIR
if len(published_articles) > 0:
try:
self.elife_poa_lib.generate.build_pubmed_xml_for_articles(published_articles)
except:
return False
return True
def approve_for_publishing(self):
"""
Final checks before publishing files to the endpoint
"""
status = None
# Check for empty directory
xml_files = glob.glob(self.elife_poa_lib.settings.TMP_DIR + "/*.xml")
if len(xml_files) <= 0:
status = False
else:
# Default until full sets of files checker is built
status = True
return status
def get_filename_from_path(self, f, extension):
"""
Get a filename minus the supplied file extension
and without any folder or path
"""
filename = f.split(extension)[0]
# Remove path if present
try:
filename = filename.split(os.sep)[-1]
except:
pass
return filename
def ftp_files_to_endpoint(self, from_dir, file_type, sub_dir=None):
"""
Using the POA module, FTP files to endpoint
as specified by the file_type to use in the glob
e.g. "/*.zip"
"""
zipfiles = glob.glob(from_dir + file_type)
self.elife_poa_lib.ftp.ftp_to_endpoint(zipfiles, sub_dir)
def get_outbox_s3_key_names(self, force=None):
"""
Separately get a list of S3 key names form the outbox
for reporting purposes, excluding the outbox folder itself
"""
# Return cached values if available
if self.outbox_s3_key_names and not force:
return self.outbox_s3_key_names
bucket_name = self.publish_bucket
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id,
self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
s3_key_names = s3lib.get_s3_key_names_from_bucket(
bucket=bucket,
prefix=self.outbox_folder)
# Remove the outbox_folder from the list, if present
try:
s3_key_names.remove(self.outbox_folder)
except:
pass
self.outbox_s3_key_names = s3_key_names
return self.outbox_s3_key_names
def get_to_folder_name(self):
"""
From the date_stamp
return the S3 folder name to save published files into
"""
to_folder = None
date_folder_name = self.date_stamp
to_folder = self.published_folder + date_folder_name + "/"
return to_folder
def clean_outbox(self):
"""
Clean out the S3 outbox folder
"""
# Save the list of outbox contents to report on later
outbox_s3_key_names = self.get_outbox_s3_key_names()
to_folder = self.get_to_folder_name()
# Move only the published files from the S3 outbox to the published folder
bucket_name = self.publish_bucket
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id,
self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
# Concatenate the expected S3 outbox file names
s3_key_names = []
for name in self.article_published_file_names:
filename = name.split(os.sep)[-1]
s3_key_name = self.outbox_folder + filename
s3_key_names.append(s3_key_name)
for name in s3_key_names:
# Download objects from S3 and save to disk
# Do not delete the from_folder itself, if it is in the list
if name != self.outbox_folder:
filename = name.split("/")[-1]
new_s3_key_name = to_folder + filename
# First copy
new_s3_key = None
try:
new_s3_key = bucket.copy_key(new_s3_key_name, bucket_name, name)
except:
pass
# Then delete the old key if successful
if isinstance(new_s3_key, boto.s3.key.Key):
old_s3_key = bucket.get_key(name)
old_s3_key.delete()
def upload_pubmed_xml_to_s3(self):
"""
Upload a copy of the pubmed XML to S3 for reference
"""
xml_files = glob.glob(self.elife_poa_lib.settings.TMP_DIR + "/*.xml")
bucket_name = self.publish_bucket
# Connect to S3 and bucket
s3_conn = S3Connection(self.settings.aws_access_key_id,
self.settings.aws_secret_access_key)
bucket = s3_conn.lookup(bucket_name)
date_folder_name = self.date_stamp
s3_folder_name = self.published_folder + date_folder_name + "/" + "batch/"
for xml_file in xml_files:
s3key = boto.s3.key.Key(bucket)
s3key.key = s3_folder_name + self.get_filename_from_path(xml_file, '.xml') + '.xml'
s3key.set_contents_from_filename(xml_file, replace=True)
def add_email_to_queue(self):
"""
After do_activity is finished, send emails to recipients
on the status
"""
# Connect to DB
db_conn = self.db.connect()
# Note: Create a verified sender email address, only done once
#conn.verify_email_address(self.settings.ses_sender_email)
current_time = time.gmtime()
body = self.get_email_body(current_time)
subject = self.get_email_subject(current_time)
sender_email = self.settings.ses_poa_sender_email
recipient_email_list = []
# Handle multiple recipients, if specified
if type(self.settings.ses_admin_email) == list:
for email in self.settings.ses_admin_email:
recipient_email_list.append(email)
else:
recipient_email_list.append(self.settings.ses_admin_email)
for email in recipient_email_list:
# Add the email to the email queue
self.db.elife_add_email_to_email_queue(
recipient_email=email,
sender_email=sender_email,
email_type="PubmedArticleDeposit",
format="text",
subject=subject,
body=body)
return True
def get_activity_status_text(self, activity_status):
"""
Given the activity status boolean, return a human
readable text version
"""
if activity_status is True:
activity_status_text = "Success!"
else:
activity_status_text = "FAILED."
return activity_status_text
def get_email_subject(self, current_time):
"""
Assemble the email subject
"""
date_format = '%Y-%m-%d %H:%M'
datetime_string = time.strftime(date_format, current_time)
activity_status_text = self.get_activity_status_text(self.activity_status)
# Count the files moved from the outbox, the files that were processed
files_count = 0
outbox_s3_key_names = self.get_outbox_s3_key_names()
if outbox_s3_key_names:
files_count = len(outbox_s3_key_names)
subject = (self.name + " " + activity_status_text +
" files: " + str(files_count) +
", " + datetime_string +
", eLife SWF domain: " + self.settings.domain)
return subject
def get_email_body(self, current_time):
"""
Format the body of the email
"""
body = ""
date_format = '%Y-%m-%dT%H:%M:%S.000Z'
datetime_string = time.strftime(date_format, current_time)
activity_status_text = self.get_activity_status_text(self.activity_status)
# Bulk of body
body += self.name + " status:" + "\n"
body += "\n"
body += activity_status_text + "\n"
body += "\n"
body += "activity_status: " + str(self.activity_status) + "\n"
body += "generate_status: " + str(self.generate_status) + "\n"
body += "approve_status: " + str(self.approve_status) + "\n"
body += "ftp_status: " + str(self.ftp_status) + "\n"
body += "publish_status: " + str(self.publish_status) + "\n"
body += "outbox_status: " + str(self.outbox_status) + "\n"
body += "\n"
body += "Outbox files: " + "\n"
outbox_s3_key_names = self.get_outbox_s3_key_names()
files_count = 0
if outbox_s3_key_names:
files_count = len(outbox_s3_key_names)
if files_count > 0:
for name in outbox_s3_key_names:
body += name + "\n"
else:
body += "No files in outbox." + "\n"
# Report on published files
if len(self.article_published_file_names) > 0:
body += "\n"
body += "Published files included in pubmed XML: " + "\n"
for name in self.article_published_file_names:
body += name.split(os.sep)[-1] + "\n"
# Report on not published files
if len(self.article_not_published_file_names) > 0:
body += "\n"
body += "Files in pubmed outbox not yet published: " + "\n"
for name in self.article_not_published_file_names:
body += name.split(os.sep)[-1] + "\n"
body += "\n"
body += "-------------------------------\n"
body += "SWF workflow details: " + "\n"
body += "activityId: " + str(self.get_activityId()) + "\n"
body += "As part of workflowId: " + str(self.get_workflowId()) + "\n"
body += "As at " + datetime_string + "\n"
body += "Domain: " + self.settings.domain + "\n"
body += "\n"
body += "\n\nSincerely\n\neLife bot"
return body
def import_imports(self):
"""
Customised importing of the external library
to override the settings
MUST load settings module first, override the values
BEFORE loading anything else, or the override will not take effect
"""
# Load the files from parent directory - hellish imports but they
# seem to work now
dir_name = self.poa_lib_dir_name
self.import_poa_lib(dir_name)
self.override_poa_settings(dir_name)
self.import_poa_modules(dir_name)
def import_poa_lib(self, dir_name):
"""
POA lib import Step 1: import external library by directory name
"""
self.elife_poa_lib = __import__(dir_name)
self.reload_module(self.elife_poa_lib)
def override_poa_settings(self, dir_name):
"""
POA lib import Step 2: import settings modules then override
"""
# Load external library settings
importlib.import_module(dir_name + ".settings")
# Reload the module fresh, so original directory names are reset
self.reload_module(self.elife_poa_lib.settings)
settings = self.elife_poa_lib.settings
# Override the settings
settings.STAGING_TO_HW_DIR = self.get_tmp_dir() + os.sep + settings.STAGING_TO_HW_DIR
settings.TMP_DIR = self.get_tmp_dir() + os.sep + settings.TMP_DIR
# Override the FTP settings with the bot environment settings
settings.FTP_URI = self.settings.PUBMED_FTP_URI
settings.FTP_USERNAME = self.settings.PUBMED_FTP_USERNAME
settings.FTP_PASSWORD = self.settings.PUBMED_FTP_PASSWORD
settings.FTP_CWD = self.settings.PUBMED_FTP_CWD
def import_poa_modules(self, dir_name="elife-poa-xml-generation"):
"""
POA lib import Step 3: import modules now that settings are overridden
"""
# Now we can continue with imports
self.elife_poa_lib.parse = importlib.import_module(dir_name + ".parsePoaXml")
self.reload_module(self.elife_poa_lib.parse)
self.elife_poa_lib.generate = importlib.import_module(dir_name + ".generatePubMedXml")
self.reload_module(self.elife_poa_lib.generate)
self.elife_poa_lib.ftp = importlib.import_module(dir_name + ".ftp_to_highwire")
self.reload_module(self.elife_poa_lib.ftp)
def reload_module(self, module):
"""
Attempt to reload an imported module to reset it
"""
try:
reload(module)
except:
pass
def create_activity_directories(self):
"""
Create the directories in the activity tmp_dir
"""
try:
os.mkdir(self.elife_poa_lib.settings.STAGING_TO_HW_DIR)
os.mkdir(self.elife_poa_lib.settings.TMP_DIR)
except OSError:
pass
| 34.547988 | 95 | 0.604803 |
1d36691ce9176f3881ac709c0adf792b41141b70 | 1,188 | py | Python | tests/unit_tests/cx_core/feature_support/cover_support_test.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 19 | 2019-11-21T19:51:40.000Z | 2020-01-14T09:24:33.000Z | tests/unit_tests/cx_core/feature_support/cover_support_test.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 11 | 2019-11-20T16:43:35.000Z | 2020-01-17T16:23:06.000Z | tests/unit_tests/cx_core/feature_support/cover_support_test.py | xaviml/z2m_ikea_controller | e612af5a913e8b4784dcaa23ea5319115427d083 | [
"MIT"
] | 5 | 2019-12-20T21:31:07.000Z | 2020-01-06T18:49:52.000Z | from typing import List
import pytest
from cx_core.feature_support import FeatureSupport
from cx_core.feature_support.cover import CoverSupport
from cx_core.type_controller import Entity, TypeController
@pytest.mark.parametrize(
"number, expected_supported_features",
[
(1, [CoverSupport.OPEN]),
(
15,
[
CoverSupport.OPEN,
CoverSupport.CLOSE,
CoverSupport.SET_COVER_POSITION,
CoverSupport.STOP,
],
),
(
149,
[
CoverSupport.SET_TILT_POSITION,
CoverSupport.OPEN_TILT,
CoverSupport.SET_COVER_POSITION,
CoverSupport.OPEN,
],
),
],
)
async def test_is_supported(
fake_type_controller: TypeController[Entity],
number: int,
expected_supported_features: List[int],
) -> None:
feature_support = FeatureSupport(fake_type_controller)
feature_support._supported_features = number
for expected_supported_feature in expected_supported_features:
assert await feature_support.is_supported(expected_supported_feature)
| 28.285714 | 77 | 0.633838 |
adbb3ed1566d69bf488ff890a247eea6b4cfb7a1 | 6,976 | py | Python | tests/client_tests/test_tw_preprocessor.py | unfoldingWord-dev/door43-job-handler | d26424810706875dd6eef33288900ea486de2e03 | [
"MIT"
] | null | null | null | tests/client_tests/test_tw_preprocessor.py | unfoldingWord-dev/door43-job-handler | d26424810706875dd6eef33288900ea486de2e03 | [
"MIT"
] | 66 | 2018-10-16T19:14:36.000Z | 2020-10-23T03:17:48.000Z | tests/client_tests/test_tw_preprocessor.py | unfoldingWord-dev/door43-job-handler | d26424810706875dd6eef33288900ea486de2e03 | [
"MIT"
] | null | null | null | import os
import tempfile
import unittest
import shutil
import markdown2
from bs4 import BeautifulSoup
from resource_container.ResourceContainer import RC
from preprocessors.preprocessors import do_preprocess, TwPreprocessor
from general_tools.file_utils import unzip, read_file
class TestTwPreprocessor(unittest.TestCase):
resources_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'resources')
def setUp(self):
"""Runs before each test."""
self.out_dir = ''
self.temp_dir = ""
def tearDown(self):
"""Runs after each test."""
# delete temp files
if os.path.isdir(self.out_dir):
shutil.rmtree(self.out_dir, ignore_errors=True)
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir, ignore_errors=True)
# def test_tw_preprocessor(self):
# # given
# repo_name = 'en_tw'
# file_name = os.path.join('raw_sources', repo_name + '.zip')
# rc, repo_dir, self.temp_dir = self.extractFiles(file_name, repo_name)
# repo_dir = os.path.join(repo_dir)
# self.out_dir = tempfile.mkdtemp(prefix='output_')
# # when
# results = do_preprocess('Translation_Words', 'dummyOwner', 'dummyURL', rc, repo_dir, self.out_dir)
# # then
# self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'index.json')))
# self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'kt.md')))
# self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'names.md')))
# self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'other.md')))
# kt = read_file(os.path.join(self.out_dir, 'kt.md'))
# names = read_file(os.path.join(self.out_dir, 'names.md'))
# other = read_file(os.path.join(self.out_dir, 'other.md'))
# soup = BeautifulSoup(markdown2.markdown(kt, extras=['markdown-in-html', 'tables']), 'html.parser')
# self.assertEqual(soup.h1.text, 'Key Terms')
# self.assertEqual(soup.h2.text, 'abomination, abominations, abominable')
# self.assertIsNotNone(soup.find('a', {'id': 'adoption'}))
# self.assertEqual(len(soup.find_all('li')), 4009)
# # Test links have been converted
# # self.assertIsNotNone(soup.find("a", {"href": "#accuracy-check"}))
# # self.assertIsNotNone(soup.find("a", {"href": "03-translate.html#figs-explicit"}))
# # make sure no old links exist
# self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'manifest.yaml')))
# self.assertTrue('(rc:' not in kt)
# self.assertTrue('(rc:' not in names)
# self.assertTrue('(rc:' not in other)
# self.assertTrue('../' not in kt)
# self.assertTrue('../' not in names)
# self.assertTrue('../' not in other)
def test_fix_links(self):
# given
# rc = RC(os.path.join(self.resources_dir, 'manifests', 'tw')) # RJH: this folder doesn't exist
# NOTE: This causes RC to find details for language 'tw' ('Twi')
rc = RC()
repo_owner, repo_name = 'dummyOwner', 'Door43'
current_category = 'names'
current_term = 'sheep'
tw = TwPreprocessor('dummyURL', rc, repo_owner, tempfile.gettempdir(), tempfile.gettempdir())
tw.repo_name = repo_name
content = "This has links to the same category: (See also: [titus](../names/titus.md), [timothy](../names/timothy.md)"
expected = "This has links to the same category: (See also: [titus](INVALID names/titus), [timothy](INVALID names/timothy)"
# when
converted = tw.fix_tW_links(content, current_category, current_term, repo_owner)
# then
self.assertEqual(converted, expected)
# given
content = """This has links to other categories:
(See also:[lamb](../kt/lamb.md), [license](../other/license.md)"""
expected = """This has links to other categories:
(See also:[lamb](INVALID kt/lamb), [license](INVALID other/license)"""
# when
converted = tw.fix_tW_links(content, current_category, current_term, repo_owner)
# then
self.assertEqual(converted, expected)
# # given
# content = """This has links to the same category and others:
# (See also: [titus](../names/titus.md), [timothy](../names/timothy.md), [lamb](../kt/lamb.md),
# [license](../other/license.md)"""
# expected = """This has links to the same category and others:
# (See also: [titus](#titus), [timothy](#timothy), [lamb](kt.html#lamb),
# [license](other.html#license)"""
# # when
# converted = tw.fix_tW_links(content, current_category, current_term, repo_owner)
# # then
# self.assertEqual(converted, expected)
# # given
# content = """This link should NOT be converted: [webpage](http://example.com/somewhere/outthere) """
# expected = """This link should NOT be converted: [webpage](http://example.com/somewhere/outthere) """
# # when
# converted = tw.fix_tW_links(content, current_category, current_term, repo_owner)
# # then
# self.assertEqual(converted, expected)
# # given
# content = """This [link](rc://en/tn/help/ezr/09/01) is a rc link that should go to
# ezr/09/01.md in the en_tn repo"""
# expected = f"""This [link](https://git.door43.org/{repo_owner}/en_tn/src/branch/master/ezr/09/01.md) is a rc link that should go to
# ezr/09/01.md in the en_tn repo"""
# # when
# converted = tw.fix_tW_links(content, current_category, current_term, repo_owner)
# # then
# self.assertEqual(converted, expected)
# # given
# content = """This url should be made into a link: http://example.com/somewhere/outthere and so should www.example.com/asdf.html?id=5&view=dashboard#report."""
# expected = """This url should be made into a link: [http://example.com/somewhere/outthere](http://example.com/somewhere/outthere) and so should [www.example.com/asdf.html?id=5&view=dashboard#report](http://www.example.com/asdf.html?id=5&view=dashboard#report)."""
# # when
# converted = tw.fix_tW_links(content, current_category, current_term, repo_owner)
# # then
# self.assertEqual(converted, expected)
#
# helpers
#
# @classmethod
# def extractFiles(cls, file_name, repo_name):
# file_path = os.path.join(TestTwPreprocessor.resources_dir, file_name)
# # 1) unzip the repo files
# temp_dir = tempfile.mkdtemp(prefix='Door43_test_repo_')
# unzip(file_path, temp_dir)
# repo_dir = os.path.join(temp_dir, repo_name)
# if not os.path.isdir(repo_dir):
# repo_dir = file_path
# # 2) Get the resource container
# rc = RC(repo_dir)
# return rc, repo_dir, temp_dir
| 42.797546 | 273 | 0.623997 |
9716fe1e513dab725a3647d84f68843402e56038 | 14,961 | py | Python | tests/notifier/notifiers/inventory_summary_test.py | perambulist/forseti-security | 5b87bc536f3d33fdeaa0c2a1f20eea3f56c79060 | [
"Apache-2.0"
] | 1 | 2018-10-06T23:16:59.000Z | 2018-10-06T23:16:59.000Z | tests/notifier/notifiers/inventory_summary_test.py | perambulist/forseti-security | 5b87bc536f3d33fdeaa0c2a1f20eea3f56c79060 | [
"Apache-2.0"
] | null | null | null | tests/notifier/notifiers/inventory_summary_test.py | perambulist/forseti-security | 5b87bc536f3d33fdeaa0c2a1f20eea3f56c79060 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the GCS inventory summary upload notifier."""
import mock
import unittest
from datetime import datetime
from google.cloud.forseti.common.util import errors as util_errors
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import base_notification
from google.cloud.forseti.notifier.notifiers import inventory_summary
from tests.unittest_utils import ForsetiTestCase
class InventorySummaryTest(ForsetiTestCase):
"""Tests for inventory_summary_notifier."""
def setUp(self):
"""Setup."""
ForsetiTestCase.setUp(self)
self.fake_utcnow = datetime(year=1920, month=5, day=6, hour=7, minute=8)
def tearDown(self):
"""Tear down method."""
ForsetiTestCase.tearDown(self)
@mock.patch(
'google.cloud.forseti.notifier.notifiers.inventory_summary.date_time',
autospec=True)
def test_get_output_filename(self, mock_date_time):
"""Test_get_output_filename()."""
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {}
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
actual_filename = notifier._get_output_filename(
string_formats.INVENTORY_SUMMARY_CSV_FMT)
expected_filename = string_formats.INVENTORY_SUMMARY_CSV_FMT.format(
notifier.inventory_index_id, expected_timestamp)
self.assertEquals(expected_filename, actual_filename)
@mock.patch(
'google.cloud.forseti.notifier.notifiers.inventory_summary.date_time',
autospec=True)
def test_get_output_filename_with_json(self, mock_date_time):
"""Test_get_output_filename()."""
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {}
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
actual_filename = notifier._get_output_filename(
string_formats.INVENTORY_SUMMARY_JSON_FMT)
expected_filename = string_formats.INVENTORY_SUMMARY_JSON_FMT.format(
notifier.inventory_index_id, expected_timestamp)
self.assertEquals(expected_filename, actual_filename)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.os')
def test_upload_to_gcs_with_csv(self, mock_os, mock_tempfile, mock_storage):
"""Test run()."""
fake_tmpname = 'tmp_name'
fake_output_name = 'abc'
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'csv',
'gcs_path': 'gs://abcd'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._get_output_filename = mock.MagicMock(
return_value=fake_output_name)
gcs_path = '{}/{}'.format('gs://abcd', fake_output_name)
mock_tmp_csv = mock.MagicMock()
mock_tempfile.return_value = mock_tmp_csv
mock_tmp_csv.name = fake_tmpname
mock_tmp_csv.write = mock.MagicMock()
notifier._upload_to_gcs([{}])
mock_tmp_csv.write.assert_called()
mock_storage.return_value.put_text_file.assert_called_once_with(
fake_tmpname, gcs_path)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_json(self, mock_write_csv, mock_json_stringify,
mock_storage):
"""Test run() with json file format."""
mock_json_stringify.return_value = 'test123'
fake_tmpname = 'tmp_name'
fake_output_name = 'abc'
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'json',
'gcs_path': 'gs://abcd'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._get_output_filename = mock.MagicMock(
return_value=fake_output_name)
notifier._upload_to_gcs([{}])
self.assertTrue(notifier._get_output_filename.called)
self.assertEquals(
string_formats.INVENTORY_SUMMARY_JSON_FMT,
notifier._get_output_filename.call_args[0][0])
self.assertFalse(mock_write_csv.called)
self.assertTrue(mock_json_stringify.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_upload_to_gcs_with_invalid_data_format(self, mock_write_csv,
mock_json_stringify, mock_storage):
"""Test run() with json file format."""
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'blah',
'gcs_path': 'gs://abcd'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._get_output_filename = mock.MagicMock()
with self.assertRaises(base_notification.InvalidDataFormatError):
notifier._upload_to_gcs([{}])
self.assertFalse(notifier._get_output_filename.called)
self.assertFalse(mock_write_csv.called)
self.assertFalse(mock_json_stringify.called)
@mock.patch('google.cloud.forseti.common.util.file_uploader.StorageClient', autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
@mock.patch('google.cloud.forseti.notifier.notifiers.inventory_summary.LOGGER', autospec=True)
def test_gcs_path_is_not_set_in_config(self, mock_logger, mock_write_csv,
mock_json_stringify, mock_storage):
"""Test run() with json file format."""
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'blah'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._get_output_filename = mock.MagicMock()
notifier._upload_to_gcs([{}])
self.assertTrue(mock_logger.error.called)
self.assertEquals(
'gcs_path not set for inventory summary notifier.',
mock_logger.error.call_args[0][0])
@mock.patch('google.cloud.forseti.common.util.file_uploader.StorageClient', autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
@mock.patch('google.cloud.forseti.notifier.notifiers.inventory_summary.LOGGER', autospec=True)
def test_inventory_summary_invalid_gcs_path(self, mock_logger, mock_write_csv,
mock_json_stringify, mock_storage):
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'blah',
'gcs_path': 'blah'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._get_output_filename = mock.MagicMock()
notifier._upload_to_gcs([{}])
self.assertTrue(mock_logger.error.called)
self.assertTrue('Invalid GCS path' in mock_logger.error.call_args[0][0])
@mock.patch('google.cloud.forseti.notifier.notifiers.inventory_summary.LOGGER', autospec=True)
def test_no_inventory_in_config(self, mock_logger):
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = dict()
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier.run()
self.assertTrue(mock_logger.info.called)
self.assertEquals(
'No inventory configuration for notifier.',
mock_logger.info.call_args[0][0])
@mock.patch('google.cloud.forseti.notifier.notifiers.inventory_summary.LOGGER', autospec=True)
def test_no_inventory_summary_in_config(self, mock_logger):
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'blah': 'blah blah'}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier.run()
self.assertTrue(
('unable to get inventory summary configuration'
in mock_logger.exception.call_args[0][0]))
@mock.patch('google.cloud.forseti.notifier.notifiers.inventory_summary.LOGGER', autospec=True)
def test_inventory_summary_not_enabled_in_config(self, mock_logger):
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': False},
'email_summary': {'enabled': False}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier.run()
self.assertTrue(mock_logger.info.called)
self.assertEquals(
'All inventory summaries are disabled.',
mock_logger.info.call_args[0][0])
@mock.patch('google.cloud.forseti.notifier.notifiers.inventory_summary.LOGGER', autospec=True)
def test_inventory_summary_no_summary_data(self, mock_logger):
mock_service_config = mock.MagicMock()
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'csv',
'gcs_path': 'gs://blah'},
'email_summary': {'enabled': True,
'sendgrid_api_key': 'blah',
'sender': 'blah',
'recipient': 'blah'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._get_summary_data = mock.MagicMock()
notifier._get_summary_data.side_effect = util_errors.NoDataError
notifier.run()
self.assertTrue(mock_logger.exception.called)
self.assertTrue('no summary data is found'
in mock_logger.exception.call_args[0][0])
def test_inventory_summary_can_run_successfully(self):
mock_inventory_index = mock.MagicMock()
mock_inventory_index.get_summary.return_value = {
'bucket': 2, 'object': 1, 'organization': 1, 'project': 2}
mock_session = mock.MagicMock()
mock_session.query.return_value.get.return_value = mock_inventory_index
mock_service_config = mock.MagicMock()
mock_service_config.scoped_session.return_value.__enter__.return_value = mock_session
mock_service_config.get_notifier_config.return_value = {
'inventory': {'gcs_summary': {'enabled': True,
'data_format': 'csv',
'gcs_path': 'gs://blah'},
'email_summary': {'enabled': True,
'sendgrid_api_key': 'blah',
'sender': 'blah',
'recipient': 'blah'}}}
notifier = inventory_summary.InventorySummary(mock_service_config,
'abcd')
notifier._upload_to_gcs = mock.MagicMock()
notifier._send_email = mock.MagicMock()
notifier.run()
expected_summary_data = [
{'count': 2, 'resource_type': 'bucket'},
{'count': 1, 'resource_type': 'object'},
{'count': 1, 'resource_type': 'organization'},
{'count': 2, 'resource_type': 'project'}]
self.assertEquals(1, notifier._upload_to_gcs.call_count)
self.assertEquals(
expected_summary_data,
notifier._upload_to_gcs.call_args[0][0])
self.assertEquals(1, notifier._send_email.call_count)
self.assertEquals(
expected_summary_data,
notifier._send_email.call_args[0][0])
if __name__ == '__main__':
unittest.main()
| 44.263314 | 98 | 0.63211 |
68965ce7bd26f71e255dc78c1a8f151e11dd37c5 | 3,170 | py | Python | src/python/mlenv_cloud/core/validate.py | gogasca/mlenv | cacd66340d03c018989d5d4222cff4bc1d432336 | [
"Apache-2.0"
] | null | null | null | src/python/mlenv_cloud/core/validate.py | gogasca/mlenv | cacd66340d03c018989d5d4222cff4bc1d432336 | [
"Apache-2.0"
] | null | null | null | src/python/mlenv_cloud/core/validate.py | gogasca/mlenv | cacd66340d03c018989d5d4222cff4bc1d432336 | [
"Apache-2.0"
] | null | null | null | """Module that performs validations on the inputs to the `run` API."""
import os
from . import gcp
def validate(
entry_point,
requirements_txt,
entry_point_args,
docker_image_build_bucket,
called_from_notebook,
docker_parent_image=None,
):
"""Validates the inputs.
Args:
entry_point: Optional string. File path to the python file or iPython
notebook that contains the TensorFlow code.
requirements_txt: Optional string. File path to requirements.txt file
containing aditionally pip dependencies, if any.
entry_point_args: Optional list of strings. Defaults to None.
Command line arguments to pass to the `entry_point` program.
docker_image_build_bucket: Optional string. Cloud storage bucket name.
called_from_notebook: Boolean. True if the API is run in a
notebook environment.
docker_parent_image: Optional parent Docker image to use.
Defaults to None.
Raises:
ValueError: if any of the inputs is invalid.
"""
_validate_files(entry_point, requirements_txt)
_validate_other_args(
entry_point_args,
docker_image_build_bucket,
called_from_notebook,
)
def _validate_files(entry_point, requirements_txt):
"""Validates all the file path params."""
cwd = os.getcwd()
if entry_point is not None and (
not os.path.isfile(os.path.join(cwd, entry_point))):
raise ValueError(
"Invalid `entry_point`. "
"Expected a relative path in the current directory tree. "
"Received: {}".format(entry_point)
)
if requirements_txt is not None and (
not os.path.isfile(os.path.join(cwd, requirements_txt))
):
raise ValueError(
"Invalid `requirements_txt`. "
"Expected a relative path in the current directory tree. "
"Received: {}".format(requirements_txt)
)
if entry_point is not None and (
not (entry_point.endswith("py") or entry_point.endswith("ipynb"))
):
raise ValueError(
"Invalid `entry_point`. "
"Expected a python file or an iPython notebook. "
"Received: {}".format(entry_point)
)
def _validate_other_args(
args, docker_image_build_bucket, called_from_notebook
):
"""Validates all non-file/distribution strategy args."""
if args is not None and not isinstance(args, list):
raise ValueError(
"Invalid `entry_point_args` input. "
"Expected None or a list. "
"Received {}.".format(str(args))
)
if called_from_notebook and docker_image_build_bucket is None:
raise ValueError(
"Invalid `docker_config.image_build_bucket` input. "
"When `run` API is used within a python notebook, "
"`docker_config.image_build_bucket` is expected to be specifed. We "
"will use the bucket name in Google Cloud Storage/Build services "
"for Docker containerization. Received {}.".format(
str(docker_image_build_bucket)
)
) | 34.835165 | 80 | 0.643218 |
10099ded7c39c3aba73fd13a209ff30e5ae05f6e | 7,200 | py | Python | kubernetes_asyncio/client/models/v1_deployment.py | opsani/kubernetes_asyncio | 55283bf6f3690e5c0a0c589cd752221511e2be51 | [
"Apache-2.0"
] | 196 | 2018-05-23T16:55:41.000Z | 2022-03-31T10:09:40.000Z | kubernetes_asyncio/client/models/v1_deployment.py | tomplus/kubernetes_asyncio | e8c8686ec11be3a5295ae9d5d8728299492a61f8 | [
"Apache-2.0"
] | 164 | 2018-05-20T20:39:03.000Z | 2022-03-29T22:57:04.000Z | kubernetes_asyncio/client/models/v1_deployment.py | opsani/kubernetes_asyncio | 55283bf6f3690e5c0a0c589cd752221511e2be51 | [
"Apache-2.0"
] | 41 | 2018-06-08T00:39:53.000Z | 2022-01-12T18:19:06.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.18.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1Deployment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1DeploymentSpec',
'status': 'V1DeploymentStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Deployment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Deployment. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Deployment. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Deployment.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Deployment. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Deployment. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Deployment. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Deployment.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Deployment. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Deployment. # noqa: E501
:return: The metadata of this V1Deployment. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Deployment.
:param metadata: The metadata of this V1Deployment. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Deployment. # noqa: E501
:return: The spec of this V1Deployment. # noqa: E501
:rtype: V1DeploymentSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Deployment.
:param spec: The spec of this V1Deployment. # noqa: E501
:type: V1DeploymentSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Deployment. # noqa: E501
:return: The status of this V1Deployment. # noqa: E501
:rtype: V1DeploymentStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Deployment.
:param status: The status of this V1Deployment. # noqa: E501
:type: V1DeploymentStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Deployment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Deployment):
return True
return self.to_dict() != other.to_dict()
| 31.441048 | 312 | 0.613194 |
e8aa4b2667f6c9564d0304724946bfde284051e5 | 29,452 | py | Python | lora/dragino/MAChandler.py | hferentschik/balena-lora-node | 666fb6448aa27e9867538112067a84b88bf105b7 | [
"MIT"
] | null | null | null | lora/dragino/MAChandler.py | hferentschik/balena-lora-node | 666fb6448aa27e9867538112067a84b88bf105b7 | [
"MIT"
] | null | null | null | lora/dragino/MAChandler.py | hferentschik/balena-lora-node | 666fb6448aa27e9867538112067a84b88bf105b7 | [
"MIT"
] | null | null | null | """
MAChandler.py
Handles MAC commands sent by the server.
responses are placed in a list of tuples
when the responses are required to add to an uplink
the responses are converted to a bytearray
The array of responses is cleared after reading
All attributes are cached when the class is deleted
and reloaded when it is instantiated.
The initial attributes are set from the config file TTN section
when the class is instantiated and overwritten by the cached values.
To start over, delete the cache file before instantiating this class.
"""
import logging
import json
import toml
from .Strings import *
import random
# MAC commands have requests and answers
# the ID of the command is the same whether it is a REQ or ANS
class MCMD:
LINK_CHECK_ANS=0x02
LINK_CHECK_REQ=LINK_CHECK_ANS
LINK_ADR_REQ=0x03
DUTY_CYCLE_REQ=0x04
RX_PARAM_SETUP_REQ=0x05
DEV_STATUS_REQ= 0x06
NEW_CHANNEL_REQ=0x07
RX_TIMING_SETUP_REQ=0x08
TX_PARAM_SETUP_REQ=0x09
DL_CHANNEL_REQ=0x0A
# 0x0B..0x0C RFU
TIME_ANS=0x0D
TIME_REQ=TIME_ANS
# 0x0E..0x0F RFU
# 0x10..0x1F reserved Class B commands
# 0x20..0x2F reserved Class C commands
# 0x30..0x7F RFU
# 0x80..0xFF proprietry extensions
"""END - allows geany to collapse properly"""
DEFAULT_LOG_LEVEL=logging.DEBUG
class MAC_commands(object):
def __init__(self,config, logging_level=DEFAULT_LOG_LEVEL):
self.logger = logging.getLogger("MAChandler")
self.logger.setLevel(logging_level)
if config is None:
self.logger.error("config is None")
exit()
self.config=config
self.cache={} # TTN dynamic settings
# jump table for MAC commands taken from spec 1.0.4
# REQ are commands from the server requesting some info/changes
# ANS are in response to MAC commands sent to the server
self.commands = {
MCMD.LINK_CHECK_ANS: self.link_check_ans,
MCMD.LINK_ADR_REQ: self.link_adr_req,
MCMD.DUTY_CYCLE_REQ: self.duty_cycle_req,
MCMD.RX_PARAM_SETUP_REQ: self.rx_param_setup_req,
MCMD.DEV_STATUS_REQ: self.dev_status_req,
MCMD.NEW_CHANNEL_REQ: self.new_channel_req,
MCMD.RX_TIMING_SETUP_REQ: self.rx_timing_setup_req,
MCMD.TX_PARAM_SETUP_REQ: self.tx_param_setup_req,
MCMD.DL_CHANNEL_REQ: self.dl_channel_req,
# 0x0B..0x0C RFU
MCMD.TIME_ANS: self.time_ans,
# 0x0E..0x0F RFU
# 0x10..0x1F reserved Class B commands
# 0x20..0x2F reserved Class C commands
# 0x30..0x7F RFU
# 0x80..0xFF proprietry extensions
}
self.frequency_plan=self.config[TTN][FREQUENCY_PLAN]
self.lastSNR=0
self.setCacheDefaults()
self.lastSendSettings=(None,None,None)
# initialise values from user config file
# this gives the code a starting point on first run
self.cache={}
for k in MAC_SETTINGS:
try:
self.logger.debug(f"Setting self.cache[{k}] to {self.config[TTN][k]}")
self.cache[k]=self.config[TTN][k]
except KeyError:
self.logger.info(f"missing TTN section key {k} ignored")
# get TTN network keys
auth_mode=self.config[TTN][AUTH_MODE]
for k in KEY_SETTINGS:
try:
self.logger.debug(f"Setting self.cache[{k}] to {self.config[TTN][auth_mode][k]}")
self.cache[k]=self.config[TTN][auth_mode][k]
except KeyError:
self.logger.info(f"Missing {auth_mode} TTN key {k} ignored")
self.loadCache() # load any cached values
# always reset these
self.macReplies=[] # list of replies to MAC commands
self.macCmds=None # list of MAC commands in downlink
self.macIndex=0 # pointer to next MAC cmd in macCmds
# these values are tracked whenever a MAC linkCheckReq command is answered
#
# gw_margin is the signal strength above noise floor so min value gives us best
# indication of decoder success
# gw_cnt is the number of gateways which received our transmision. the linkCheckReq
# must be issued a number of times because gateways in reach may not be listening
self.gw_margin=0 # min is calculated
self.gw_cnt=255 # max is calculated
self.logger.info("__init__ done")
def setLastSNR(self,SNR):
"""
used by status reply to server status req
not cached because it can vary a lot
"""
self.logger.info(f"last SNR value {SNR}")
self.lastSNR=SNR
'''
getters and setters for cached values
'''
def getLinkCheckStatus(self):
return (self.gw_margin,self.gw_cnt)
def getRX1Delay(self):
return self.cache[RX1_DELAY]
def setRX1Delay(self,delay):
""" passed in with JOIN_ACCEPT payload
:param delay: seconds
:return Nothing: no reply expected
"""
self.logger.info(f"set RX1 delay {delay}")
self.cache[RX1_DELAY]=delay
self.saveCache()
def getDevAddr(self):
try:
return self.cache[DEVADDR]
except:
return bytearray([0x00,0x00,0x00,0x00])
def setDevAddr(self,DevAddr):
self.cache[DEVADDR]=DevAddr
self.saveCache()
def getNewSKey(self):
return self.cache[NEWSKEY]
def setNewSKey(self,key):
self.cache[NEWSKEY]=key
self.saveCache()
def getAppSKey(self):
return self.cache[APPSKEY]
def setAppSKey(self,appskey):
self.cache[APPSKEY]=appskey
self.saveCache()
def getAppKey(self):
return self.cache[APPKEY]
def getAppEui(self):
return self.cache[APPEUI]
def getDevEui(self):
return self.cache[DEVEUI]
def getFCntUp(self):
return self.cache[FCNTUP]
def setFCntUp(self,count):
self.cache[FCNTUP]=count
self.saveCache()
def getJoinSettings(self):
"""
When joining only the first three frequencies
should be used
max duty cycle is also selected
:return (freq,sf,bw)
"""
freq=random.choice(self.channelFrequencies[0:3])
self.cache[MAX_DUTY_CYCLE]=self.getMaxDutyCycle(freq)
sf,bw=self.config[self.frequency_plan][DATA_RATES][self.cache[DATA_RATE]]
self.lastSendSettings=(freq,sf,bw)
self.logger.debug(f"using join settings freq {freq} sf {sf} bw {bw}")
return freq,sf,bw
def getDataRate(self):
return self.cache[DATA_RATE]
def getLastSendSettings(self):
"""
:return tuple: (freq,sf,bw)
"""
return self.lastSendSettings
def getSendSettings(self):
"""
randomly choose a frequency
once joined all frequencies are available for use
Use current data rate
:return (freq,sf,bw)
"""
max_channel=self.config[self.frequency_plan][MAX_CHANNELS]
freq=random.choice(self.channelFrequencies[:max_channel])
self.cache[MAX_DUTY_CYCLE]=self.getMaxDutyCycle(freq)
sf,bw=self.config[self.frequency_plan][DATA_RATES][self.cache[DATA_RATE]]
self.lastSendSettings=(freq,sf,bw)
self.logger.debug(f"using send settings freq {freq} sf {sf} bw {bw}")
return freq,sf,bw
def getRX1Settings(self):
"""
RX1 is normally the same as last send settings unless
the RX1_DR_OFFSET is not zero
frequency is not changed
:return (freq,sf,bw)
"""
if self.cache[RX1_DR]==self.cache[DATA_RATE]:
self.logger.debug(f"using last send settings for RX1")
return self.lastSendSettings
# RX1 data rate is different but frequency is normally the same
freq=self.lastSendSettings[0] # we only want the frequency
# frequency may have been fixed by MAC command
if self.cache[RX1_FREQ_FIXED]:
freq=self.cache[RX1_FREQUENCY]
sf,bw=self.config[self.frequency_plan][DATA_RATES][self.cache[RX1_DR]]
self.logger.debug(f"rx1 settings freq {freq} sf {sf} bw {bw}")
return freq,sf,bw
def getRX2Settings(self):
"""
RX2 is a fixed frequency,sf and bw
:return (freq,sf,bw)
"""
freq=self.cache[RX2_FREQUENCY]
sf,bw=self.config[self.frequency_plan][DATA_RATES][self.cache[RX2_DR]]
self.logger.debug(f"rx2 settings freq {freq} sf {sf} bw {bw}")
return freq,sf,bw
def getMaxDutyCycle(self,freq=None):
"""
return the max duty cycle for a given frequency
"""
if freq is None:
freq,sf,bw=self.getLastSendSettings()
if freq is None:
freq=self.channelFrequencies[0] #
self.logger.error(f"Nothing has been transmitted using max duty cycle for {freq} instead")
DC_table=self.config[self.frequency_plan][DUTY_CYCLE_TABLE]
for (minFreq,maxFreq,dc) in DC_table:
#self.cache[MAX_EIRP]= eirp
if minFreq<=freq <=maxFreq:
return dc
self.logger.error(f"unable to locate max duty cycle for {freq}. Using 0.1 instead")
return 0.1
def getSfBw(self,drIndex):
"""
gets the data rate for a given data rate index
returns a tuple (sf,bw)
The set_bw() function expects a value between 0 and 9
"""
sf,bw=self.config[self.frequency_plan][DATA_RATES][drIndex]
return (sf,bw)
def get_bw_index(self,wanted):
"""
the set_bw() function takes an index 0-9 check the value is valid
:param wanted: one of [7.8, 10.4, 15.6, 20.8, 31.25, 41.7, 62.5, 125.0, 250.0, 500.0] kHz
"""
return self.config[self.frequency_plan][BANDWIDTHS].index(wanted)
def getFrequencyPlan(self):
"""
get the frequency plan channel frequencies
used internally
"""
self.logger.info("loading frequency plan")
try:
self.logger.info(f"Frequency Plan is {self.frequency_plan}")
self.channelDRRange = [(0, 7)] * self.config[self.frequency_plan][MAX_CHANNELS]
self.channelFrequencies=self.config[self.frequency_plan][LORA_FREQS]
self.newChannelIndex=0
self.logger.info("Frequency Plan loaded ok")
except Exception as e:
self.logger.error(f"error loading frequency plan. Check if it exists in the config.toml. {e}")
def setDLsettings(self,settings):
"""
passed in with JOIN_ACCEPT payload
:param settings: upper byte RX1 DR offset, lower RX2 DR
:return nothing: JOIN_ACCEPT does not expect a reply
"""
rx1_dr_offset=(settings & 0x70)>>4
dr_table_row=self.config[self.frequency_plan][DR_OFFSET_TABLE][self.cache[DATA_RATE]]
rx1_dr=dr_table_row[rx1_dr_offset]
self.cache[RX1_DR]=rx1_dr
self.cache[RX2_DR]=settings & 0x0F
self.saveCache()
self.logger.info(f"DL settings rx1_dr_offset{rx1_dr_offset} rx1_DR {rx1_dr} rx2_DR {settings & 0x0F}")
def _computeFreq(self,a):
"""
:param a: byte array of 3 octets
:return f: frequency in xxx.y mHz format
"""
freq=(a[2] << 16 ) + (a[1] << 8) + a[0] * 100
# frequency is like 868100000 but we want 868.1
return freq/1000000
def handleCFList(self,delay,cflist):
"""
upto 16 bytes
5 channel frequencies in groups of 3 bytes per frequency
plus one byte 0 passed in with JOIN_ACCEPT payload
:param cflist: 5 channel frequencies packed in 3 bytes LSB first
"""
self.logger.info("processing cfList from JOIN_ACCEPT")
if cflist[-1:]!=0:
self.logger.info("cfList type is non-zero")
ch=4;
for entry in range(5):
# get each slice
i=entry*3
self.lora_freqs[ch]=self._computeFreq(cflist[i:i+3])
ch+=1
def setCacheDefaults(self):
"""
default settings
"""
self.logger.info("Setting default MAC values using user config values")
self.cache[DATA_RATE]=self.config[TTN][DATA_RATE]
self.cache[CHANNEL_FREQUENCIES] = self.config[self.frequency_plan][LORA_FREQS]
self.cache[OUTPUT_POWER]=self.config[TTN][OUTPUT_POWER]
self.cache[MAX_POWER]=self.config[TTN][MAX_POWER]
#self.channelDRrange = [(0,7)] * self.config[self.frequency_plan][MAX_CHANNELS] # all disabled
# extract freqs from frequency plan
self.getFrequencyPlan()
# the following attributes can be changed by MAC commands
# not all are cached
# these are listed in groups according to the MAC command
# which changes the value
# they are also over written from the user config and MAC cache
# link ADR req
#for a in [CH_MASK,CH_MASK_CTL,NB_TRANS]:
# self.cache[a]=0
# Duty Cycle req - percentage airtime allowed
# duty cycle depends on frequency but is mostly
# 1% in EU868
self.cache[DUTY_CYCLE]=1
# RXParamSetup
self.cache[RX1_DR]=self.config[TTN][RX1_DR]
self.cache[RX2_DR]=self.config[TTN][RX2_DR]
# TX and RX1 frequencies change, RX2 is constant
# RX1 frequency can be set by MAC
self.cache[RX1_FREQ_FIXED]=False
self.cache[RX2_FREQUENCY]=self.config[TTN][RX2_FREQUENCY]
self.cache[RX1_DELAY]=self.config[TTN][RX1_DELAY]
self.cache[RX2_DELAY]=self.config[TTN][RX2_DELAY]
# with OTAA some of these are set after joining
# and cached so that a JOIN isn't needed every time
auth_mode=self.config[TTN][AUTH_MODE]
self.cache[APPKEY]=self.config[TTN][auth_mode][APPKEY]
self.cache[APPEUI]=self.config[TTN][auth_mode][APPEUI]
self.cache[DEVEUI]=self.config[TTN][auth_mode][DEVEUI]
if self.config[TTN][AUTH_MODE]==OTAA:
# NEWSKEY and APPSKEY are set after joining
self.cache[DEVADDR]=bytearray([0x00,0x00,0x00,0x00])
self.cache[APPSKEY]=bytearray()
self.cache[NEWSKEY]=bytearray()
else:
# ABP settings
self.cache[DEVADDR]=self.config[TTN][ABP][DEVADDR]
self.cache[APPSKEY]=self.config[TTN][ABP][APPSKEY]
self.cache[NEWSKEY]=self.config[TTN][ABP][NEWSKEY]
# frame counts - will be reset on OTAA joining
self.cache[FCNTUP]=self.config[TTN][FCNTUP]
self.cache[FCNTDN]=self.config[TTN][FCNTDN]
self.logger.info("MAC default settings finished")
# do not call saveCache() - loadCache() will do that if
# the cache file doesn't exist
def saveCache(self):
"""
MAC commands received from TTN alter device behaviour
"""
try:
self.logger.info("Saving MAC settings")
with open(self.config[TTN][MAC_CACHE], "w") as f:
json.dump(self.cache, f)
except Exception as e:
self.logger.info(f"Saving MAC settings failed {e}.")
def incrementFcntUp(self):
"""
increments the FcntUp and save to cache
"""
self.cache[FCNTUP]+=1
self.saveCache()
def checkFcntDn(self,fcntdn):
"""
fcntdn should be incrementing
"""
prev=self.cache[FCNTDN]
if fcntdn<=prev:
self.logger.warn("received downlink FCntDn < or = previous")
return
self.cache[FCNTDN]=fcntdn
self.saveCache()
def loadCache(self):
"""
load mac parameters (if saved)
"""
self.logger.info("Loading MAC settings")
settings={}
try:
with open(self.config[TTN][MAC_CACHE], "r") as f:
settings = json.load(f)
if not settings:
self.logger.warning("cached MAC settings is empty. Could be first run?")
return
self.cache=settings
self.logger.info("cached settings loaded ok")
except Exception as e:
self.logger.info(f"cached settings load failed {e}. Saving current defaults")
self.saveCache()
def getFOpts(self):
"""
these are the MAC replies. The spec says the server can send multiple
commands in a packet.
The replies are cleared when this method is called otherwise
they would be sent to TTN with every uplink
:param: None
:return: (Fopts,FoptsLen)
:rtype: tuple
"""
FOpts=self.macReplies # should this be reversed?
FOptsLen=len(FOpts)
self.logger.info(f"check for FOpts to attach to uplink len={FOptsLen} FOpts={FOpts}")
self.macReplies=[] # clear them as we don't want to send with every messages
if FOptsLen==0:
self.logger.info("no FOpts")
return [],0
if FOptsLen>0 and FOptsLen<=16:
return (FOpts,FOptsLen)
self.logger.warning(f"FOpts len={FOptsLen} exceeds 16 bytes={FOpts}")
return [],0
####################################################
#
# here are the MAC command handlers
#
# taken from the V1.0.4 spec
#
####################################################
def handleCommand(self, macPayload):
"""
these are commands originated from the server
They are only executed if FPort==0
MAC conmands are acknowledged by sending an uplink repeating
the command CID
This method is called if a message includes a MAC payload
:param macPayload: a MAC payload object
"""
self.logger.debug("checking MAC payload for MAC commands")
FCtrl=macPayload.get_fhdr().get_fctrl()
FOptsLen=FCtrl & 0x0F
FCnt=macPayload.get_fhdr().get_fcnt() # frame downlink frame counter
self.logger.debug(f"received frame FCnt={FCnt} FCntDn={self.cache[FCNTDN]}")
self.cache[FCNTDN]=FCnt
FOpts=macPayload.get_fhdr().fhdr.get_fopts()
FPort=macPayload.get_fport()
FRMpayload=macPayload.get_frm_payload()
self.logger.debug(f"FCtrl={FCtrl} FCnt={FCnt} FOpts={FOpts} FoptsLen={FOptsLen} FPort={FPort} FRMpayload={FRMpayload}")
# mac commands may contain several commands
# all need replying to in the order sent
self.MACreplies=bytearray()
if FOptsLen==0 or (FPort is not None and FPort > 0):
# no MAC commands
return
# MAC commands can appear in FOpts field or FRMpayload but not both
self.macCmds=None
if FPort == 0:
# MAC commands only and in FRMpayload
FOpts=FRMpayload
elif FPort>0 and FOptsLen>0:
# commands are in the FOpts field
self.macCmds=FOpts
else:
# no MAC commands
return
# process the MAC commands - there may be multiple commands
# all may need answering
self.processFOpts(FOpts)
def processFopts(self,FOpts):
"""
can be called directly if downlink message does not include a FRM payload
:param FOpts: array of MAC commands
"""
self.macIndex=0
self.macCmds=FOpts
while self.macIndex<len(self.macCmds):
CID=self.macCmds[self.macIndex]
# called functions add to self.macReplies
self.logger.debug(f"Calling MAC cmd with CID {CID}")
try:
func = self.commands[CID]
func()
except KeyError:
self.logger.error(f"invalid MAC command CID {CID}. Aborting MAC handling")
break
# update any changes
self.saveCache()
def link_check_req(self):
"""
adds a link check request to the macReplies list
this will be sent with the next uplink
The server will send a LINK_CHECK_ANS.
"""
self.macReplies+=[MCMD.LINK_CHECK_REQ]
def link_check_ans(self):
"""
The server sends this to acknowledge us sending a LinkCheckReq
Recieved payload will be 2 bytes [Margin][GwCnt]
GwCnt is number of gateways which received the transmission from us
Margin is the the demod margin (db) range 0..254 (255 reserved)
no response needed
"""
# values can be retrieved with getLinkCheckStatus()
self.gw_margin=min(self.gw_margin,self.macCmds[self.macIndex+1])
self.gw_cnt=max(self.gw_cnt,self.macCmds[self.macIndex+2])
self.logger.debug(f"link check ans margin {self.gw_margin} GwCnt {self.gw_cnt}")
self.macIndex+=3
def link_adr_req(self):
"""
Server is asking us to do a data rate adaption
payload (bytes) is [DR & txPower:1][chMask:2][redundancy:1]
ChMask determines the channels usable for uplink access
data_rate & power [DR: 7..4, Power: 3..0] Region Specific
redundancy rfu:7, ChMaskCntl:6..4 , NbTrans:3..0
return status byte: RFU:7..3, PowerAck:2, DRAck: 1, ChMaskAck:0
"""
self.cache[RX1_DR]=self.macCmds[self.macIndex+1] & 0xF0 >> 4
self.cache[OUTPUT_POWER]=self.macCmds[self.macIndex+1] & 0x0F
self.cache[CH_MASK]=self.macCmds[self.macIndex+2] << 8 & self.macCmds[self.macIndex+3]
self.cache[CH_MASK_CTL]=self.macCmds[self.macIndex+4] & 0x0e >> 4
self.cache[NB_TRANS]=self.macCmds[self.macIndex+4] & 0x0F
self.MACreplies+=[MCMD.LINK_ADR_REQ]
self.macIndex+=5
def duty_cycle_req(self):
"""
Change the duty cycle
1 byte [RFU: 7..4][MaxDutyCycle: 3..0]
value not used - we are using the duty_cycle_range in the frequency plan
section of the config file
"""
self.cache[MAX_DUTY_CYCLE]=self.macCmds[self.macIndex+1] & 0x0F
self.macReplies+=[MCMD.DUTY_CYCLE_REQ]
self.macIndex+=2
def rx_param_setup_req(self):
"""
Setup RX2 parameters
payload=[DLSettings:1] [Frequency:3]:
DLsettings [RFU:7,RX1DROffset:6..4,RX2DataRate:3..0]
reply is 1 byte with bit encoding
RFU:7..3,RX1DROffsetAck:2, RX2DataRateACK:2,ChannelACK:0
"""
DLSettings=self.macCmds[self.macIndex+1]
# TODO only if all are valid otherwise no change
reply=0x00
rx1_dr_offset=(DLSettings & 0xE0) >> 4
if 0<=rx1_dr_offset<=5:
reply=reply or 0x01
rx2_dr_index=(DLSettings & 0x0F)
if 0<=rx2_dr_index<=8:
reply=reply or 0x02
freq=self._computeFreq(self.macCmds[self.macIndex+2:self.macIndex+4])
if freq in self.lora_freqs:
reply=reply or 0x04
if reply==0x07:
self.cache[RX1_DR]+=rx1_dr_offset
self.cache[RX2_DR]=rx2_dr_index
self.cache[RX2_FREQUENCY]=freq
# Channel ACK 0=unusable, 1 ok
# RX2DataRateAck 0=unknown data rate, 1 ok
# RX1DROffsetACK 0=not in allowed ranbge, 1 ok
self.macReplies+=[MCMD.RX_PARAM_SETUP_REQ,0x07]
self.macIndex+=5
def dev_status_req(self):
"""
Server is asking for device status
return 2 bytes [battery][RadioStatus]
Battery
0 = connected to external power source
1..254 battery level
255 - not able to measure
Radio Status from last dev_status_req command
bits 5..0 SNR 6 bit signed int
"""
self.logger.info(f"Dev Status Req - returns (0,{int(self.lastSNR)})")
self.macReplies+=[MCMD.DEV_STATUS_REQ,0,int(self.lastSNR)]
self.macIndex+=1
def new_channel_req(self):
"""
modify a channel
payload [ChIndex:0][Frequency:1..3][DRRange:4]
TODO split DRRange
reply 1 byte encoded RFU:7..2, DataRateOk: 1, ChannelFreqOk 0
"""
ChIndex = self.macCmds[self.macIndex+1]
newFreq=self._computeFreq(self.macCmds[self.macIndex+2:self.macIndex+5])
DRRange = self.macCmds[self.macIndex+5] # uplink data rate range (max,min)
maxDR=(DRRange &0xF0) >>4
minDR=(DRRange &0x0F)
# TODO - check newFreq is possible first
# needs to know region parameters
minFreq=min(self.ChannelFrequencies)
maxFreq=max(self.channelFrequencies)
if not (minFreq<=newFreq<=maxFreq):
self.logger.info(f"new freq {newFreq} not in range min {minFreq} - {maxFreq}")
self.macReplies+=[MCMD.NEW_CHANNEL_REQ,0x02]
else:
self.channelFrequencies[ChIndex] = newFreq
self.channelDRRange[ChIndex] = (minDR,maxDR)
self.logger.info(f"NewChannelReq chIndex {chIndex} freq {newFreq} maxDR {maxDR} minDR {minDR}")
# answer - assume all ok
self.macReplies+=[MCMD.NEW_CHANNEL_REQ,0x03]
self.macIndex+=6
def rx_timing_setup_req(self):
"""
payload is 1 byte RX1 delay encoded in bits3..0
"""
rx1_delay=self.macCmds[self.macIndex+1] & 0x0f # seconds
if rx1_delay == 0:
rx1_delay = 1
self.cache[RX1_DELAY]=rx1_delay
self.logger.info(f"rx timing setup RX1 delay={rx1_delay}")
self.macReplies+=[MCMD.RX_TIMING_SETUP_REQ]
self.macIndex+=2
def tx_param_setup_req(self, mac_payload):
"""
payload 1 byte
[RFU:7..6][DownlinkDwellTime:5][UplinkDwellTime:4][maxEIRP:3..0]
DwellTimes: 0= no limit, 1=400ms
Currently the values are stored and acknowledged but not used
"""
dldt=self.macCmds[self.macIndex+1] & 0x20 >> 5
uldt=self.macCmds[self.macIndex+1] & 0x10 >> 4
maxEirp=self.macCmds[self.macIndex+1] & 0x0F
self.cache[DOWNLINK_DWELL_TIME]=dldt
self.cache[UPLINK_DWELL_TIME]=uldt
self.cache[MAX_EIRP]=maxEirp
self.logger.info(f"tx param setup DL dwell {dldt} UL dwell {uldt} maxEIRP {maxEirp}")
self.macReplies+=[MCMD.TX_PARAM_SETUP_REQ]
self.macIndex += 2
def dl_channel_req(self):
"""
only EU863-870 & CN779-787
payload 4 bytes
[ChIndex:1][Freq:3]
reply 1 byte bit encoded
[RFU 7:2][Uplink Freq Exists 1][channel freq ok 0]
"""
ChIndex = self.macCmds[self.macIndex+1]
newFreq=self._computeFreq(self.macCmds[self.macIndex+2:self.macIndex+5])
self.channelFrequencies[ChIndex] = newFreq
self.cache[RX1_FREQ_FIXED]=True
self.cache[RX1_FREQUENCY]=newFreq
self.logger.info(f"DL channel req ChIndex {ChIndex} newFreq {newFreq}")
# answer -
# assume Uplink Frequency exists and channel freq ok
self.macReplies+=[MCMD.DL_CHANNEL_REQ,0x03]
self.macIndex += 5
def time_req(self):
"""
prompt the server for a TIME_ANS
"""
self.macReplies+=[MCMD.TIME_REQ]
def time_ans(self):
"""
introduced in 1.0.3
It is the time at the end of the uplink transmission requesting it.
payload 5 bytes
[seconds since epoch:0..3][fractional seconds:4]
Fractional seconds are 1/256 s increments
Received as a Class A downlink
"""
seconds=self.macCmds[self.macIndex+1:self.macIndex+5]
fraction=self.macCmds[self.macIndex+5] / 256
self.logger.info(f"server time was {seconds}.{fraction}")
# to use this the caller needs to track time of sending
# warning, using the returned values can be a problem
# we can determin the time the server received the request
# but it will be hard to tell how long it takes to receive
# the information back hence there will be an error. However,
# if the end device time is massively different then it should be
# corrected but the Dragino HAT has a GPS and can be time synced to that
# use the server time at your peril
# this is a response from the server when we send a time_req.
# Does not require an ACK
self.macIndex+=6
| 31.874459 | 127 | 0.595002 |
d3c968e8b49632a4e3441904c5c6034e7c8d8dc3 | 14,965 | py | Python | hdf5_dataio.py | HaoRiziyou/light-field-networks | 438c198dc024b1a90f5d98d5c1e6cde078de2ec0 | [
"MIT"
] | 95 | 2021-12-10T03:29:41.000Z | 2022-03-28T08:19:17.000Z | hdf5_dataio.py | HaoRiziyou/light-field-networks | 438c198dc024b1a90f5d98d5c1e6cde078de2ec0 | [
"MIT"
] | 4 | 2021-12-26T05:07:20.000Z | 2022-03-08T18:31:30.000Z | hdf5_dataio.py | HaoRiziyou/light-field-networks | 438c198dc024b1a90f5d98d5c1e6cde078de2ec0 | [
"MIT"
] | 14 | 2021-12-10T10:50:18.000Z | 2022-03-02T05:05:06.000Z | import cv2
import os
import torch
import numpy as np
from glob import glob
import data_util
import util
from collections import defaultdict
import h5py
class SceneInstanceDataset(torch.utils.data.Dataset):
"""This creates a dataset class for a single object instance (such as a single car)."""
def __init__(self,
instance_idx,
instance_dir,
specific_observation_idcs=None, # For few-shot case: Can pick specific observations only
img_sidelength=None,
num_images=None,
cache=None):
self.instance_idx = instance_idx
self.img_sidelength = img_sidelength
self.instance_dir = instance_dir
self.cache = cache
self.has_depth = False
color_dir = os.path.join(instance_dir, "rgb")
pose_dir = os.path.join(instance_dir, "pose")
param_dir = os.path.join(instance_dir, "params")
if not os.path.isdir(color_dir):
print("Error! root dir %s is wrong" % instance_dir)
return
self.has_params = os.path.isdir(param_dir)
self.color_paths = sorted(data_util.glob_imgs(color_dir))
self.pose_paths = sorted(glob(os.path.join(pose_dir, "*.txt")))
self.instance_name = os.path.basename(os.path.dirname(self.instance_dir))
if self.has_params:
self.param_paths = sorted(glob(os.path.join(param_dir, "*.txt")))
else:
self.param_paths = []
if specific_observation_idcs is not None:
self.color_paths = util.pick(self.color_paths, specific_observation_idcs)
self.pose_paths = util.pick(self.pose_paths, specific_observation_idcs)
self.param_paths = util.pick(self.param_paths, specific_observation_idcs)
elif num_images is not None:
idcs = np.linspace(0, stop=len(self.color_paths), num=num_images, endpoint=False, dtype=int)
self.color_paths = util.pick(self.color_paths, idcs)
self.pose_paths = util.pick(self.pose_paths, idcs)
self.param_paths = util.pick(self.param_paths, idcs)
dummy_img = data_util.load_rgb(self.color_paths[0])
self.org_sidelength = dummy_img.shape[1]
if self.org_sidelength < self.img_sidelength:
uv = np.mgrid[0:self.img_sidelength, 0:self.img_sidelength].astype(np.int32).transpose(1, 2, 0)
self.intrinsics, _, _, _ = util.parse_intrinsics(os.path.join(self.instance_dir, "intrinsics.txt"),
trgt_sidelength=self.img_sidelength)
else:
uv = np.mgrid[0:self.org_sidelength, 0:self.org_sidelength].astype(np.int32).transpose(1, 2, 0)
uv = cv2.resize(uv, (self.img_sidelength, self.img_sidelength), interpolation=cv2.INTER_NEAREST)
self.intrinsics, _, _, _ = util.parse_intrinsics(os.path.join(self.instance_dir, "intrinsics.txt"),
trgt_sidelength=self.org_sidelength)
uv = torch.from_numpy(np.flip(uv, axis=-1).copy()).long()
self.uv = uv.reshape(-1, 2).float()
self.intrinsics = torch.Tensor(self.intrinsics).float()
def __len__(self):
return min(len(self.pose_paths), len(self.color_paths))
def __getitem__(self, idx):
key = f'{self.instance_idx}_{idx}'
if (self.cache is not None) and (key in self.cache):
rgb, pose = self.cache[key]
else:
rgb = data_util.load_rgb(self.color_paths[idx])
pose = data_util.load_pose(self.pose_paths[idx])
if (self.cache is not None) and (key not in self.cache):
self.cache[key] = rgb, pose
rgb = cv2.resize(rgb, (self.img_sidelength, self.img_sidelength), interpolation=cv2.INTER_NEAREST)
rgb = rgb.reshape(-1, 3)
sample = {
"instance_idx": torch.Tensor([self.instance_idx]).squeeze().long(),
"rgb": torch.from_numpy(rgb).float(),
"cam2world": torch.from_numpy(pose).float(),
"uv": self.uv,
"intrinsics": self.intrinsics,
"height_width": torch.from_numpy(np.array([self.img_sidelength, self.img_sidelength])),
"instance_name": self.instance_name
}
return sample
class SceneInstanceDatasetHDF5(torch.utils.data.Dataset):
def __init__(self, instance_idx, instance_ds, img_sidelength, instance_name, specific_observation_idcs=None,
num_images=None, cache=None):
self.instance_idx = instance_idx
self.img_sidelength = img_sidelength
self.cache = cache
self.instance_ds = instance_ds
self.has_depth = False
self.color_keys = sorted(list(instance_ds['rgb'].keys()))
self.pose_keys = sorted(list(instance_ds['pose'].keys()))
self.instance_name = instance_name
if specific_observation_idcs is not None:
self.color_keys = util.pick(self.color_keys, specific_observation_idcs)
self.pose_keys = util.pick(self.pose_keys, specific_observation_idcs)
elif num_images is not None:
idcs = np.linspace(0, stop=len(self.color_keys), num=num_images, endpoint=False, dtype=int)
self.color_keys = util.pick(self.color_keys, idcs)
self.pose_keys = util.pick(self.pose_keys, idcs)
dummy_img = data_util.load_rgb_hdf5(self.instance_ds, self.color_keys[0])
self.org_sidelength = dummy_img.shape[1]
if self.org_sidelength < self.img_sidelength:
uv = np.mgrid[0:self.img_sidelength, 0:self.img_sidelength].astype(np.int32).transpose(1, 2, 0)
self.intrinsics, _, _ = util.parse_intrinsics_hdf5(instance_ds['intrinsics.txt'],
trgt_sidelength=self.img_sidelength)
else:
uv = np.mgrid[0:self.org_sidelength, 0:self.org_sidelength].astype(np.int32).transpose(1, 2, 0)
uv = cv2.resize(uv, (self.img_sidelength, self.img_sidelength), interpolation=cv2.INTER_NEAREST)
self.intrinsics, _, _ = util.parse_intrinsics_hdf5(instance_ds['intrinsics.txt'],
trgt_sidelength=self.org_sidelength)
uv = torch.from_numpy(np.flip(uv, axis=-1).copy()).long()
self.uv = uv.reshape(-1, 2).float()
self.intrinsics = torch.from_numpy(self.intrinsics).float()
def __len__(self):
return min(len(self.pose_keys), len(self.color_keys))
def __getitem__(self, idx):
key = f'{self.instance_idx}_{idx}'
if (self.cache is not None) and (key in self.cache):
rgb, pose = self.cache[key]
else:
rgb = data_util.load_rgb_hdf5(self.instance_ds, self.color_keys[idx])
pose = data_util.load_pose_hdf5(self.instance_ds, self.pose_keys[idx])
if (self.cache is not None) and (key not in self.cache):
self.cache[key] = rgb, pose
rgb = cv2.resize(rgb, (self.img_sidelength, self.img_sidelength), interpolation=cv2.INTER_NEAREST)
rgb = rgb.reshape(-1, 3)
sample = {
"instance_idx": torch.Tensor([self.instance_idx]).squeeze().long(),
"rgb": torch.from_numpy(rgb).float(),
"cam2world": torch.from_numpy(pose).float(),
"uv": self.uv,
"intrinsics": self.intrinsics,
"instance_name": self.instance_name
}
return sample
def get_num_instances(data_root):
if 'hdf5' in data_root:
file = h5py.File(data_root, 'r')
instances = list(file.keys())
else:
instances = len(glob(os.path.join(data_root, "*/")))
return len(instances)
def get_instance_datasets(root, max_num_instances=None, specific_observation_idcs=None,
cache=None, sidelen=None, max_observations_per_instance=None, start_idx=0):
instance_dirs = sorted(glob(os.path.join(root, "*/")))
assert (len(instance_dirs) != 0), f"No objects in the directory {root}"
if max_num_instances != None:
instance_dirs = instance_dirs[:max_num_instances]
all_instances = [SceneInstanceDataset(instance_idx=idx+start_idx, instance_dir=dir,
specific_observation_idcs=specific_observation_idcs, img_sidelength=sidelen,
cache=cache, num_images=max_observations_per_instance)
for idx, dir in enumerate(instance_dirs)]
return all_instances
def get_instance_datasets_hdf5(root, max_num_instances=None, specific_observation_idcs=None,
cache=None, sidelen=None, max_observations_per_instance=None,
start_idx=0):
file = h5py.File(root, 'r')
instances = sorted(list(file.keys()))
print(f"File {root}, {len(instances)} instances")
if max_num_instances is not None:
instances = instances[:max_num_instances]
all_instances = [SceneInstanceDatasetHDF5(instance_idx=idx+start_idx, instance_ds=file[instance_name],
specific_observation_idcs=specific_observation_idcs,
img_sidelength=sidelen, num_images=max_observations_per_instance,
cache=cache, instance_name=instance_name)
for idx, instance_name in enumerate(instances)]
return all_instances
class SceneClassDataset(torch.utils.data.Dataset):
"""Dataset for a class of objects, where each datapoint is a SceneInstanceDataset."""
def __init__(self, num_context, num_trgt, data_root,
vary_context_number=False, query_sparsity=None,
img_sidelength=None, max_num_instances=None,
max_observations_per_instance=None, specific_observation_idcs=None,
test=False, test_context_idcs=None, cache=None, start_idx=0):
self.num_context = num_context
self.num_trgt = num_trgt
self.query_sparsity = query_sparsity
self.img_sidelength = img_sidelength
self.vary_context_number = vary_context_number
self.cache = cache
self.test = test
self.test_context_idcs = test_context_idcs
if 'hdf5' in data_root:
self.all_instances = get_instance_datasets_hdf5(data_root, max_num_instances=max_num_instances,
specific_observation_idcs=specific_observation_idcs,
cache=cache, sidelen=img_sidelength,
max_observations_per_instance=max_observations_per_instance,
start_idx=start_idx)
else:
self.all_instances = get_instance_datasets(data_root, max_num_instances=max_num_instances,
specific_observation_idcs=specific_observation_idcs,
cache=cache, sidelen=img_sidelength,
max_observations_per_instance=max_observations_per_instance,
start_idx=start_idx)
self.num_per_instance_observations = [len(obj) for obj in self.all_instances]
self.num_instances = len(self.all_instances)
def sparsify(self, dict, sparsity):
if sparsity is None:
return dict
else:
new_dict = {}
rand_idcs = np.random.choice(self.img_sidelength ** 2, size=sparsity, replace=False)
for key in ['rgb', 'uv']:
new_dict[key] = dict[key][rand_idcs]
for key, v in dict.items():
if key not in ['rgb', 'uv']:
new_dict[key] = dict[key]
return new_dict
def __len__(self):
return np.sum(self.num_per_instance_observations)
def get_instance_idx(self, idx):
if self.test:
obj_idx = 0
while idx >= 0:
idx -= self.num_per_instance_observations[obj_idx]
obj_idx += 1
return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])
else:
return np.random.randint(self.num_instances), 0
def collate_fn(self, batch_list):
result = defaultdict(list)
if not batch_list:
return result
keys = batch_list[0].keys()
for entry in batch_list:
# make them all into a new dict
for key in keys:
result[key].append(entry[key])
for key in keys:
try:
result[key] = torch.stack(result[key], dim=0)
except:
continue
return result
def __getitem__(self, idx):
context = []
trgt = []
post_input = []
obj_idx, det_idx = self.get_instance_idx(idx)
if self.vary_context_number and self.num_context > 0:
num_context = np.random.randint(1, self.num_context + 1)
if not self.test:
try:
sample_idcs = np.random.choice(len(self.all_instances[obj_idx]), replace=False,
size=self.num_context + self.num_trgt)
except:
sample_idcs = np.random.choice(len(self.all_instances[obj_idx]), replace=True,
size=self.num_context + self.num_trgt)
for i in range(self.num_context):
if self.test:
sample = self.all_instances[obj_idx][self.test_context_idcs[i]]
else:
sample = self.all_instances[obj_idx][sample_idcs[i]]
context.append(sample)
if self.vary_context_number:
if i < num_context:
context[-1]['mask'] = torch.Tensor([1.])
else:
context[-1]['mask'] = torch.Tensor([0.])
else:
context[-1]['mask'] = torch.Tensor([1.])
for i in range(self.num_trgt):
if self.test:
sample = self.all_instances[obj_idx][det_idx]
else:
sample = self.all_instances[obj_idx][sample_idcs[i + self.num_context]]
# post_input.append(sample)
# post_input[-1]['mask'] = torch.Tensor([1.])
sub_sample = self.sparsify(sample, self.query_sparsity)
trgt.append(sub_sample)
# post_input = self.collate_fn(post_input)
if self.num_context > 0:
context = self.collate_fn(context)
trgt = self.collate_fn(trgt)
return {'context': context, 'query': trgt}, trgt
| 43.126801 | 120 | 0.596191 |
231f5fd62e7e3dc1e5d85cfdb3d69af1005b902a | 18,669 | py | Python | qiskit/quantum_info/operators/pauli.py | yeralin/qiskit-terra | 251930a7b5d83af121ea0f3aafb33a54a1860e14 | [
"Apache-2.0"
] | 1 | 2019-06-04T12:23:36.000Z | 2019-06-04T12:23:36.000Z | qiskit/quantum_info/operators/pauli.py | yeralin/qiskit-terra | 251930a7b5d83af121ea0f3aafb33a54a1860e14 | [
"Apache-2.0"
] | 35 | 2019-03-07T02:09:22.000Z | 2022-03-22T19:55:15.000Z | qiskit/quantum_info/operators/pauli.py | yeralin/qiskit-terra | 251930a7b5d83af121ea0f3aafb33a54a1860e14 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,assignment-from-no-return
"""
Tools for working with Pauli Operators.
A simple pauli class and some tools.
"""
import warnings
import numpy as np
from scipy import sparse
from qiskit.exceptions import QiskitError
def _make_np_bool(arr):
if not isinstance(arr, (list, np.ndarray, tuple)):
arr = [arr]
arr = np.asarray(arr).astype(np.bool)
return arr
def _count_set_bits(i):
"""
Counts the number of set bits in a uint (or a numpy array of uints).
"""
i = i - ((i >> 1) & 0x55555555)
i = (i & 0x33333333) + ((i >> 2) & 0x33333333)
return (((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) & 0xffffffff) >> 24
class Pauli:
"""A simple class representing Pauli Operators.
The form is P_zx = (-i)^dot(z,x) Z^z X^x where z and x are elements of Z_2^n.
That is, there are 4^n elements (no phases in this group).
For example, for 1 qubit
P_00 = Z^0 X^0 = I
P_01 = X
P_10 = Z
P_11 = -iZX = (-i) iY = Y
The overload __mul__ does not track the sign: P1*P2 = Z^(z1+z2) X^(x1+x2) but
sgn_prod does __mul__ and track the phase: P1*P2 = (-i)^dot(z1+z2,x1+x2) Z^(z1+z2) X^(x1+x2)
where the sums are taken modulo 2.
Pauli vectors z and x are supposed to be defined as boolean numpy arrays.
Ref.
Jeroen Dehaene and Bart De Moor
Clifford group, stabilizer states, and linear and quadratic operations
over GF(2)
Phys. Rev. A 68, 042318 – Published 20 October 2003
"""
def __init__(self, z=None, x=None, label=None):
r"""Make the Pauli object.
Note that, for the qubit index:
- Order of z, x vectors is q_0 ... q_{n-1},
- Order of pauli label is q_{n-1} ... q_0
E.g.,
- z and x vectors: z = [z_0 ... z_{n-1}], x = [x_0 ... x_{n-1}]
- a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
z (numpy.ndarray): boolean, z vector
x (numpy.ndarray): boolean, x vector
label (str): pauli label
"""
if label is not None:
a = Pauli.from_label(label)
self._z = a.z
self._x = a.x
else:
self._init_from_bool(z, x)
@classmethod
def from_label(cls, label):
r"""Take pauli string to construct pauli.
The qubit index of pauli label is q_{n-1} ... q_0.
E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
label (str): pauli label
Returns:
Pauli: the constructed pauli
Raises:
QiskitError: invalid character in the label
"""
z = np.zeros(len(label), dtype=np.bool)
x = np.zeros(len(label), dtype=np.bool)
for i, char in enumerate(label):
if char == 'X':
x[-i - 1] = True
elif char == 'Z':
z[-i - 1] = True
elif char == 'Y':
z[-i - 1] = True
x[-i - 1] = True
elif char != 'I':
raise QiskitError("Pauli string must be only consisted of 'I', 'X', "
"'Y' or 'Z' but you have {}.".format(char))
return cls(z=z, x=x)
def _init_from_bool(self, z, x):
"""Construct pauli from boolean array.
Args:
z (numpy.ndarray): boolean, z vector
x (numpy.ndarray): boolean, x vector
Returns:
Pauli: self
Raises:
QiskitError: if z or x are None or the length of z and x are different.
"""
if z is None:
raise QiskitError("z vector must not be None.")
if x is None:
raise QiskitError("x vector must not be None.")
if len(z) != len(x):
raise QiskitError("length of z and x vectors must be "
"the same. (z: {} vs x: {})".format(len(z), len(x)))
z = _make_np_bool(z)
x = _make_np_bool(x)
self._z = z
self._x = x
return self
def __len__(self):
"""Return number of qubits."""
return len(self._z)
def __repr__(self):
"""Return the representation of self."""
z = list(self._z)
x = list(self._x)
ret = self.__class__.__name__ + "(z={}, x={})".format(z, x)
return ret
def __str__(self):
"""Output the Pauli label."""
label = ''
for z, x in zip(self._z[::-1], self._x[::-1]):
if not z and not x:
label = ''.join([label, 'I'])
elif not z and x:
label = ''.join([label, 'X'])
elif z and not x:
label = ''.join([label, 'Z'])
else:
label = ''.join([label, 'Y'])
return label
def __eq__(self, other):
"""Return True if all Pauli terms are equal.
Args:
other (Pauli): other pauli
Returns:
bool: are self and other equal.
"""
res = False
if len(self) == len(other):
if np.all(self._z == other.z) and np.all(self._x == other.x):
res = True
return res
def __mul__(self, other):
"""Multiply two Paulis.
Returns:
Pauli: the multiplied pauli.
Raises:
QiskitError: if the number of qubits of two paulis are different.
"""
if len(self) != len(other):
raise QiskitError("These Paulis cannot be multiplied - different "
"number of qubits. ({} vs {})".format(len(self), len(other)))
z_new = np.logical_xor(self._z, other.z)
x_new = np.logical_xor(self._x, other.x)
return Pauli(z_new, x_new)
def __imul__(self, other):
"""Multiply two Paulis.
Returns:
Pauli: the multiplied pauli and save to itself, in-place computation.
Raises:
QiskitError: if the number of qubits of two paulis are different.
"""
if len(self) != len(other):
raise QiskitError("These Paulis cannot be multiplied - different "
"number of qubits. ({} vs {})".format(len(self), len(other)))
self._z = np.logical_xor(self._z, other.z)
self._x = np.logical_xor(self._x, other.x)
return self
def __hash__(self):
"""Make object is hashable, based on the pauli label to hash."""
return hash(str(self))
@property
def z(self):
"""Getter of z."""
return self._z
@property
def x(self):
"""Getter of x."""
return self._x
@staticmethod
def sgn_prod(p1, p2):
r"""
Multiply two Paulis and track the phase.
$P_3 = P_1 \otimes P_2$: X*Y
Args:
p1 (Pauli): pauli 1
p2 (Pauli): pauli 2
Returns:
Pauli: the multiplied pauli
complex: the sign of the multiplication, 1, -1, 1j or -1j
"""
phase = Pauli._prod_phase(p1, p2)
new_pauli = p1 * p2
return new_pauli, phase
@property
def num_qubits(self):
"""Number of qubits."""
return len(self)
@property
def numberofqubits(self):
"""Deprecated, use ``num_qubits`` instead. Number of qubits."""
warnings.warn('The Pauli.numberofqubits method is deprecated as of 0.13.0, and '
'will be removed no earlier than 3 months after that release date. '
'You should use the Pauli.num_qubits method instead.',
DeprecationWarning, stacklevel=2)
return self.num_qubits
def to_label(self):
"""Present the pauli labels in I, X, Y, Z format.
Order is $q_{n-1} .... q_0$
Returns:
str: pauli label
"""
return str(self)
def to_matrix(self):
r"""
Convert Pauli to a matrix representation.
Order is q_{n-1} .... q_0, i.e., $P_{n-1} \otimes ... P_0$
Returns:
numpy.array: a matrix that represents the pauli.
"""
mat = self.to_spmatrix()
return mat.toarray()
def to_spmatrix(self):
r"""
Convert Pauli to a sparse matrix representation (CSR format).
Order is q_{n-1} .... q_0, i.e., $P_{n-1} \otimes ... P_0$
Returns:
scipy.sparse.csr_matrix: a sparse matrix with CSR format that
represents the pauli.
"""
_x, _z = self._x, self._z
n = 2**len(_x)
twos_array = 1 << np.arange(len(_x))
xs = np.array(_x).dot(twos_array)
zs = np.array(_z).dot(twos_array)
rows = np.arange(n+1, dtype=np.uint)
columns = rows ^ xs
global_factor = (-1j)**np.dot(np.array(_x, dtype=np.uint), _z)
data = global_factor*(-1)**np.mod(_count_set_bits(zs & rows), 2)
return sparse.csr_matrix((data, columns, rows), shape=(n, n))
def to_operator(self):
"""Convert to Operator object."""
# Place import here to avoid cyclic import from circuit visualization
from qiskit.quantum_info.operators.operator import Operator
return Operator(self.to_matrix())
def to_instruction(self):
"""Convert to Pauli circuit instruction."""
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.circuit.library.standard_gates import IGate, XGate, YGate, ZGate
gates = {'I': IGate(), 'X': XGate(), 'Y': YGate(), 'Z': ZGate()}
label = self.to_label()
num_qubits = self.num_qubits
qreg = QuantumRegister(num_qubits)
circuit = QuantumCircuit(qreg, name='Pauli:{}'.format(label))
for i, pauli in enumerate(reversed(label)):
circuit.append(gates[pauli], [qreg[i]])
return circuit.to_instruction()
def update_z(self, z, indices=None):
"""
Update partial or entire z.
Args:
z (numpy.ndarray or list): to-be-updated z
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole z, the number of qubits must be the same.
"""
z = _make_np_bool(z)
if indices is None:
if len(self._z) != len(z):
raise QiskitError("During updating whole z, you can not "
"change the number of qubits.")
self._z = z
else:
if not isinstance(indices, list) and not isinstance(indices, np.ndarray):
indices = [indices]
for p, idx in enumerate(indices):
self._z[idx] = z[p]
return self
def update_x(self, x, indices=None):
"""
Update partial or entire x.
Args:
x (numpy.ndarray or list): to-be-updated x
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole x, the number of qubits must be the same.
"""
x = _make_np_bool(x)
if indices is None:
if len(self._x) != len(x):
raise QiskitError("During updating whole x, you can not change "
"the number of qubits.")
self._x = x
else:
if not isinstance(indices, list) and not isinstance(indices, np.ndarray):
indices = [indices]
for p, idx in enumerate(indices):
self._x[idx] = x[p]
return self
def insert_paulis(self, indices=None, paulis=None, pauli_labels=None):
"""
Insert or append pauli to the targeted indices.
If indices is None, it means append at the end.
Args:
indices (list[int]): the qubit indices to be inserted
paulis (Pauli): the to-be-inserted or appended pauli
pauli_labels (list[str]): the to-be-inserted or appended pauli label
Note:
the indices refers to the location of original paulis,
e.g. if indices = [0, 2], pauli_labels = ['Z', 'I'] and original pauli = 'ZYXI'
the pauli will be updated to ZY'I'XI'Z'
'Z' and 'I' are inserted before the qubit at 0 and 2.
Returns:
Pauli: self
Raises:
QiskitError: provide both `paulis` and `pauli_labels` at the same time
"""
if pauli_labels is not None:
if paulis is not None:
raise QiskitError("Please only provide either `paulis` or `pauli_labels`")
if isinstance(pauli_labels, str):
pauli_labels = list(pauli_labels)
# since pauli label is in reversed order.
paulis = Pauli.from_label(pauli_labels[::-1])
if indices is None: # append
self._z = np.concatenate((self._z, paulis.z))
self._x = np.concatenate((self._x, paulis.x))
else:
if not isinstance(indices, list):
indices = [indices]
self._z = np.insert(self._z, indices, paulis.z)
self._x = np.insert(self._x, indices, paulis.x)
return self
def append_paulis(self, paulis=None, pauli_labels=None):
"""
Append pauli at the end.
Args:
paulis (Pauli): the to-be-inserted or appended pauli
pauli_labels (list[str]): the to-be-inserted or appended pauli label
Returns:
Pauli: self
"""
return self.insert_paulis(None, paulis=paulis, pauli_labels=pauli_labels)
def delete_qubits(self, indices):
"""
Delete pauli at the indices.
Args:
indices(list[int]): the indices of to-be-deleted paulis
Returns:
Pauli: self
"""
if not isinstance(indices, list):
indices = [indices]
self._z = np.delete(self._z, indices)
self._x = np.delete(self._x, indices)
return self
@classmethod
def random(cls, num_qubits, seed=None):
"""Return a random Pauli on number of qubits.
Args:
num_qubits (int): the number of qubits
seed (int): Optional. To set a random seed.
Returns:
Pauli: the random pauli
"""
rng = np.random.default_rng(seed)
z = rng.integers(2, size=num_qubits).astype(np.bool)
x = rng.integers(2, size=num_qubits).astype(np.bool)
return cls(z, x)
@classmethod
def pauli_single(cls, num_qubits, index, pauli_label):
"""
Generate single qubit pauli at index with pauli_label with length num_qubits.
Args:
num_qubits (int): the length of pauli
index (int): the qubit index to insert the single qubit
pauli_label (str): pauli
Returns:
Pauli: single qubit pauli
"""
tmp = Pauli.from_label(pauli_label)
z = np.zeros(num_qubits, dtype=np.bool)
x = np.zeros(num_qubits, dtype=np.bool)
z[index] = tmp.z[0]
x[index] = tmp.x[0]
return cls(z, x)
def kron(self, other):
r"""Kronecker product of two paulis.
Order is $P_2 (other) \otimes P_1 (self)$
Args:
other (Pauli): P2
Returns:
Pauli: self
"""
self.insert_paulis(indices=None, paulis=other)
return self
@staticmethod
def _prod_phase(p1, p2):
phase_changes = 0
for z1, x1, z2, x2 in zip(p1.z, p1.x, p2.z, p2.x):
if z1 and not x1: # Z
if x2:
phase_changes = phase_changes - 1 if z2 else phase_changes + 1
elif not z1 and x1: # X
if z2:
phase_changes = phase_changes + 1 if x2 else phase_changes - 1
elif z1 and x1: # Y
if not z2 and x2: # X
phase_changes -= 1
elif z2 and not x2: # Z
phase_changes += 1
phase = (1j) ** (phase_changes % 4)
return phase
def pauli_group(number_of_qubits, case='weight'):
"""Return the Pauli group with 4^n elements.
The phases have been removed.
case 'weight' is ordered by Pauli weights and
case 'tensor' is ordered by I,X,Y,Z counting lowest qubit fastest.
Args:
number_of_qubits (int): number of qubits
case (str): determines ordering of group elements ('weight' or 'tensor')
Returns:
list: list of Pauli objects
Raises:
QiskitError: case is not 'weight' or 'tensor'
QiskitError: number_of_qubits is larger than 4
"""
if number_of_qubits < 5:
temp_set = []
if case == 'weight':
tmp = pauli_group(number_of_qubits, case='tensor')
# sort on the weight of the Pauli operator
return sorted(tmp, key=lambda x: -np.count_nonzero(
np.array(x.to_label(), 'c') == b'I'))
elif case == 'tensor':
# the Pauli set is in tensor order II IX IY IZ XI ...
for k in range(4 ** number_of_qubits):
z = np.zeros(number_of_qubits, dtype=np.bool)
x = np.zeros(number_of_qubits, dtype=np.bool)
# looping over all the qubits
for j in range(number_of_qubits):
# making the Pauli for each j fill it in from the
# end first
element = (k // (4 ** j)) % 4
if element == 1:
x[j] = True
elif element == 2:
z[j] = True
x[j] = True
elif element == 3:
z[j] = True
temp_set.append(Pauli(z, x))
return temp_set
else:
raise QiskitError("Only support 'weight' or 'tensor' cases "
"but you have {}.".format(case))
raise QiskitError("Only support number of qubits is less than 5")
| 31.967466 | 96 | 0.543575 |
e05bd34e3280f4ccad9b50190596b94167fb87ab | 2,240 | py | Python | API License Inventory by Project.py | philipckk/Black-Duck-API | d666ca3717ce9ae8407841ce52e74ff1691a6c27 | [
"Apache-2.0"
] | null | null | null | API License Inventory by Project.py | philipckk/Black-Duck-API | d666ca3717ce9ae8407841ce52e74ff1691a6c27 | [
"Apache-2.0"
] | null | null | null | API License Inventory by Project.py | philipckk/Black-Duck-API | d666ca3717ce9ae8407841ce52e74ff1691a6c27 | [
"Apache-2.0"
] | 1 | 2022-02-23T14:47:50.000Z | 2022-02-23T14:47:50.000Z | from blackduck import Client
import logging
import re
import os
from collections import Counter
import csv
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] {%(module)s:%(lineno)d} %(levelname)s - %(message)s"
)
# Please modify the token and base_url
bd = Client(
token=os.environ.get('blackduck_token'),
base_url="https://localhost",
verify=False # TLS certificate verification
)
lic_list = []
# Get license and ID
def extract_details(lic_item):
lic_ID = re.findall(r"licenses\/(.*$)",lic_item['license'])
return (lic_item['licenseDisplay'], lic_ID[0])
# Query projects
for project in bd.get_resource(name='projects'):
prj_name = project.get('name')
# Please modify project names
if prj_name in ["chanp_JavaSecCode","chanp_insecure_bank"]:
print("Processing Project:",prj_name)
for version in bd.get_resource('versions', project):
print("Processing Version:",version['versionName'])
for component in bd.get_resource('components', version):
for license in component['licenses']:
# If there is dual or multi licenses
if len(license['licenses'])>0:
for multi_lic in license['licenses']:
lic_list.append(extract_details(multi_lic))
# Otherwise
else:
lic_list.append(extract_details(license))
lic_cnt = Counter(lic_list)
results = {}
print("Outputing",len(lic_cnt),"licenses")
# Get license terms
for (key, value) in lic_cnt.items():
licenseTerms = bd.get_json(bd.base_url+'/api/licenses/'+key[1]+'/license-terms')['items']
LT = []
for licenseTerm in licenseTerms:
LT.append(licenseTerm['name']+"="+licenseTerm['responsibility'])
results[(key[0],' '.join([str(elem) for elem in LT]))] = value
# Output as CSV
with open('license_inventory.csv','w', newline='') as csvfile:
fieldnames=['License','License Terms','count']
writer=csv.writer(csvfile)
writer.writerow(fieldnames)
for key, value in sorted(results.items(), key=lambda x: x[1], reverse=True):
writer.writerow(list(key) + [value])
print("All done")
| 33.939394 | 93 | 0.6375 |
170e99a5d448b688e45e486b6b9ea88a518dc30b | 6,334 | py | Python | sdk/python/pulumi_azure_native/apimanagement/v20180601preview/get_diagnostic.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20180601preview/get_diagnostic.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20180601preview/get_diagnostic.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDiagnosticResult',
'AwaitableGetDiagnosticResult',
'get_diagnostic',
]
@pulumi.output_type
class GetDiagnosticResult:
"""
Diagnostic details.
"""
def __init__(__self__, always_log=None, backend=None, enable_http_correlation_headers=None, frontend=None, id=None, logger_id=None, name=None, sampling=None, type=None):
if always_log and not isinstance(always_log, str):
raise TypeError("Expected argument 'always_log' to be a str")
pulumi.set(__self__, "always_log", always_log)
if backend and not isinstance(backend, dict):
raise TypeError("Expected argument 'backend' to be a dict")
pulumi.set(__self__, "backend", backend)
if enable_http_correlation_headers and not isinstance(enable_http_correlation_headers, bool):
raise TypeError("Expected argument 'enable_http_correlation_headers' to be a bool")
pulumi.set(__self__, "enable_http_correlation_headers", enable_http_correlation_headers)
if frontend and not isinstance(frontend, dict):
raise TypeError("Expected argument 'frontend' to be a dict")
pulumi.set(__self__, "frontend", frontend)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if logger_id and not isinstance(logger_id, str):
raise TypeError("Expected argument 'logger_id' to be a str")
pulumi.set(__self__, "logger_id", logger_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sampling and not isinstance(sampling, dict):
raise TypeError("Expected argument 'sampling' to be a dict")
pulumi.set(__self__, "sampling", sampling)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="alwaysLog")
def always_log(self) -> Optional[str]:
"""
Specifies for what type of messages sampling settings should not apply.
"""
return pulumi.get(self, "always_log")
@property
@pulumi.getter
def backend(self) -> Optional['outputs.PipelineDiagnosticSettingsResponse']:
"""
Diagnostic settings for incoming/outgoing HTTP messages to the Backend
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter(name="enableHttpCorrelationHeaders")
def enable_http_correlation_headers(self) -> Optional[bool]:
"""
Whether to process Correlation Headers coming to Api Management Service. Only applicable to Application Insights diagnostics. Default is true.
"""
return pulumi.get(self, "enable_http_correlation_headers")
@property
@pulumi.getter
def frontend(self) -> Optional['outputs.PipelineDiagnosticSettingsResponse']:
"""
Diagnostic settings for incoming/outgoing HTTP messages to the Gateway.
"""
return pulumi.get(self, "frontend")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loggerId")
def logger_id(self) -> str:
"""
Resource Id of a target logger.
"""
return pulumi.get(self, "logger_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sampling(self) -> Optional['outputs.SamplingSettingsResponse']:
"""
Sampling settings for Diagnostic.
"""
return pulumi.get(self, "sampling")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDiagnosticResult(GetDiagnosticResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDiagnosticResult(
always_log=self.always_log,
backend=self.backend,
enable_http_correlation_headers=self.enable_http_correlation_headers,
frontend=self.frontend,
id=self.id,
logger_id=self.logger_id,
name=self.name,
sampling=self.sampling,
type=self.type)
def get_diagnostic(diagnostic_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiagnosticResult:
"""
Diagnostic details.
:param str diagnostic_id: Diagnostic identifier. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['diagnosticId'] = diagnostic_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20180601preview:getDiagnostic', __args__, opts=opts, typ=GetDiagnosticResult).value
return AwaitableGetDiagnosticResult(
always_log=__ret__.always_log,
backend=__ret__.backend,
enable_http_correlation_headers=__ret__.enable_http_correlation_headers,
frontend=__ret__.frontend,
id=__ret__.id,
logger_id=__ret__.logger_id,
name=__ret__.name,
sampling=__ret__.sampling,
type=__ret__.type)
| 36.402299 | 173 | 0.657089 |
9de0d72f441b1ba7956109aeb88704ba8062f4b8 | 2,511 | py | Python | ravendb/tests/raven_commands_tests/test_get_by_prefix.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-10-08T17:45:44.000Z | 2018-05-29T12:16:43.000Z | ravendb/tests/raven_commands_tests/test_get_by_prefix.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 5 | 2017-02-12T15:50:53.000Z | 2017-09-18T12:25:01.000Z | ravendb/tests/raven_commands_tests/test_get_by_prefix.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-07-03T07:59:12.000Z | 2017-09-18T11:22:23.000Z | import unittest
from ravendb.documents.commands.crud import PutDocumentCommand, GetDocumentsCommand
from ravendb.tests.test_base import TestBase
class TestGetDocumentsByPrefixCommand(TestBase):
def setUp(self):
super(TestGetDocumentsByPrefixCommand, self).setUp()
self.requests_executor = self.store.get_request_executor()
self.requests_executor.execute_command(
PutDocumentCommand("products/MB-200", None, {"Name": "test", "@metadata": {}})
)
self.requests_executor.execute_command(
PutDocumentCommand("products/MB-201", None, {"Name": "test", "@metadata": {}})
)
self.requests_executor.execute_command(
PutDocumentCommand("products/MB-100", None, {"Name": "test", "@metadata": {}})
)
self.requests_executor.execute_command(
PutDocumentCommand("products/MB-101", None, {"Name": "test", "@metadata": {}})
)
self.requests_executor.execute_command(
PutDocumentCommand("products/BM-100", None, {"Name": "test", "@metadata": {}})
)
self.requests_executor.execute_command(
PutDocumentCommand("products/BM-200", None, {"Name": "test", "@metadata": {}})
)
# todo: WIP
def tearDown(self):
super(TestGetDocumentsByPrefixCommand, self).tearDown()
self.delete_all_topology_files()
def test_start_with(self):
with self.store.open_session() as session:
response = session.load_starting_with(dict, "products/MB-")
self.assertEqual(4, len(response))
def test_matches(self):
with self.store.open_session() as session:
response = session.load_starting_with(dict, "products/MB-", matches="2*")
self.assertEqual(2, len(response))
def test_excludes(self):
with self.store.open_session() as session:
response = session.load_starting_with(dict, "products/BM-", exclude="2*")
self.assertEqual(1, len(response))
def test_start_after(self):
with self.store.open_session() as session:
response = session.load_starting_with(dict, "products/MB-", matches="2*", start_after="products/MB-200")
self.assertEqual(1, len(response))
def test_page(self):
with self.store.open_session() as session:
response = session.load_starting_with(dict, "products/MB", start=1, page_size=2)
self.assertEqual(2, len(response))
if __name__ == "__main__":
unittest.main()
| 38.630769 | 116 | 0.650737 |
1733c8f096f882117e25f55217630d2aae45b5f3 | 447 | py | Python | connect_to_chat.py | sleamey/underground-chat | 77a06c23c249f1bec91fb740b9de9724ebe0cd50 | [
"MIT"
] | null | null | null | connect_to_chat.py | sleamey/underground-chat | 77a06c23c249f1bec91fb740b9de9724ebe0cd50 | [
"MIT"
] | null | null | null | connect_to_chat.py | sleamey/underground-chat | 77a06c23c249f1bec91fb740b9de9724ebe0cd50 | [
"MIT"
] | null | null | null | from contextlib import asynccontextmanager
import asyncio
from asyncio import StreamReader, StreamWriter
from typing import AsyncContextManager
@asynccontextmanager
async def get_chat_connection(host: str, port: int) -> AsyncContextManager[(StreamReader, StreamWriter)]:
reader, writer = await asyncio.open_connection(host, port)
try:
yield reader, writer
finally:
writer.close()
await writer.wait_closed()
| 26.294118 | 105 | 0.756152 |
efdaebead40b3a0c80b781959f00538d4f3c72cc | 32,150 | py | Python | app.py | JGX020/picture-search-talk | ae264eed9b5d4ba08006284dedf9b8cbfcd46037 | [
"MIT"
] | 2 | 2016-03-03T05:58:04.000Z | 2017-01-26T09:58:32.000Z | app.py | JGX020/picture-search-talk | ae264eed9b5d4ba08006284dedf9b8cbfcd46037 | [
"MIT"
] | 2 | 2016-03-01T01:13:46.000Z | 2016-03-03T04:09:05.000Z | app.py | JGX020/picture-search-talk | ae264eed9b5d4ba08006284dedf9b8cbfcd46037 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
import os
import uuid
import histsimilar
import client
import time
import datetime
import random
import textreplace
import json
import dygeneratehtml
import geo
from flask import Flask, render_template,session, g,abort,flash, request, redirect, url_for, \
send_from_directory,jsonify
app = Flask(__name__)
UPLOAD_FOLDER = 'uploads/'
UPLOADFILE_FOLDER='uploadsfile'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','mp4'])
app.config['UPLOAD_FOLDER'] = UPLOADFILE_FOLDER
app.config['UPLOADFILE_FOLDER'] = UPLOAD_FOLDER
app.config['STATIC_FOLDER'] = UPLOAD_FOLDER
path = r'testpic/TEST%d/%d.JPG'
DATABASE = 'sqldb.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
rootdir = 'F:\flask-file-upload-example-master\uploads'
filename = "database/test.txt"
filename1 = "database/test1.txt"
filename2 = "database/test2.txt"
f1 = open("database/test1.txt")
fp=open('database/testtopic1.txt','w')
lii=histsimilar.data(filename)
#dict=[{'imgurl':'../uploads/6f7088b4.jpg','title':'1aaa'},{'imgurl':'../uploads/7bf7191e.jpg','title':'2bbb'},{'imgurl':'../uploads/4b721df0.jpg','title':'s9999'}]
app.config.from_object(__name__)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def get_connection():
db = getattr(g, '_db', None)
if db is None:
db = g._db = connect_db()
return db
def get_conn(path):
conn = sqlite3.connect(path)
if os.path.exists(path) and os.path.isfile(path):
print('s:[{}]'.format(path))
return conn
else:
conn = None
print('n:[:memory:]')
return sqlite3.connect(':memory:')
def get_cursor(conn):
if conn is not None:
return conn.cursor()
else:
return get_conn('').cursor()
def drop_table(conn, table):
if table is not None and table != '':
sql = 'DROP TABLE IF EXISTS ' + table
if SHOW_SQL:
print('sql:[{}]'.format(sql))
cu = get_cursor(conn)
cu.execute(sql)
conn.commit()
print('[{}]!'.format(table))
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def create_table(conn, sql):
if sql is not None and sql != '':
cu = get_cursor(conn)
if SHOW_SQL:
print('sql:[{}]'.format(sql))
cu.execute(sql)
conn.commit()
print('[student]!')
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def close_all(conn, cu):
try:
if cu is not None:
cu.close()
finally:
if cu is not None:
cu.close()
def save(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
cu = get_cursor(conn)
for d in data:
if SHOW_SQL:
print('sql:[{}],s:[{}]'.format(sql, d))
cu.execute(sql, d)
conn.commit()
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def fetchall(conn, sql):
if sql is not None and sql != '':
cu = get_cursor(conn)
if SHOW_SQL:
print('sql:[{}]'.format(sql))
cu.execute(sql)
r=[dict(id=row[0],title=row[1],url=row[2],address=row[3])for row in cu.fetchall()]
if len(r) > 0:
return r
#for e in range(len(r)):
# print(r[e])
else:
print('the [{}] is empty or equal None!'.format(sql))
def fetchone(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
#Do this instead
d = (data,)
cu = get_cursor(conn)
if SHOW_SQL:
print('tsql:[{}],c:[{}]'.format(sql, data))
cu.execute(sql, d)
r = cu.fetchall()
if len(r) > 0:
for e in range(len(r)):
print(r[e])
else:
print('the [{}] equal None!'.format(data))
else:
print('the [{}] is empty or equal None!'.format(sql))
def update(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
cu = get_cursor(conn)
for d in data:
if SHOW_SQL:
print('tsql:[{}],c:[{}]'.format(sql, d))
cu.execute(sql, d)
conn.commit()
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def delete(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
cu = get_cursor(conn)
for d in data:
if SHOW_SQL:
print('tsql:[{}],c:[{}]'.format(sql, d))
cu.execute(sql, d)
conn.commit()
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def drop_table_test():
print('ss...')
conn = get_conn(DB_FILE_PATH)
drop_table(conn, TABLE_NAME)
def create_table_test():
print('cs...')
create_table_sql = '''CREATE TABLE `pic` (
`id` int(11) NOT NULL,
`title` varchar(20) NOT NULL,
`url` varchar(4) DEFAULT NULL,
`address` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
)'''
conn = get_conn(DB_FILE_PATH)
create_table(conn, create_table_sql)
def save_test():
print('bs...')
save_sql = '''INSERT INTO pic values (?, ?, ?, ?, ?, ?)'''
data = [(1, 'Hongten', '../uploads/6f7088b4.jpg', ''),
(2, 'Tom', '../uploads/6f7088b4.jpg', ''),
(3, 'Jake', '../uploads/6f7088b4.jpg', ''),
(4, 'Cate', '../uploads/6f7088b4.jpg', '')]
conn = get_conn(DB_FILE_PATH)
save(conn, save_sql, data)
def fetchall_test():
print('sd...')
fetchall_sql = '''SELECT * FROM pic'''
conn = get_conn(DB_FILE_PATH)
fetchall(conn, fetchall_sql)
def fetchone_test():
print('select a data from database...')
fetchone_sql = 'SELECT * FROM student WHERE ID = ? '
data = 1
conn = get_conn(DB_FILE_PATH)
fetchone(conn, fetchone_sql, data)
def update_test():
print('update data...')
update_sql = 'UPDATE student SET name = ? WHERE ID = ? '
data = [('HongtenAA', 1),
('HongtenBB', 2),
('HongtenCC', 3),
('HongtenDD', 4)]
conn = get_conn(DB_FILE_PATH)
update(conn, update_sql, data)
def delete_test():
print('delete data...')
delete_sql = 'DELETE FROM student WHERE NAME = ? AND ID = ? '
data = [('HongtenAA', 1),
('HongtenCC', 3)]
conn = get_conn(DB_FILE_PATH)
delete(conn, delete_sql, data)
def init():
global DB_FILE_PATH
DB_FILE_PATH = 'F:\sqlite-shell-win32-x86-3090200\\sqldb.db'
global TABLE_NAME
TABLE_NAME = 'student'
global SHOW_SQL
SHOW_SQL = True
print('show_sql : {}'.format(SHOW_SQL))
drop_table_test()
create_table_test()
save_test()
def main():
init()
#delete_test()
#fetchall_test()
fetchall_test()
print('#' * 50)
fetchone_test()
print('#' * 50)
update_test()
fetchall_test()
print('#' * 50)
#app = Flask(__name__)
# delete_test()
# fetchall_test()
def connect_db():
return sqlite3.connect('F:\sqlite-shell-win32-x86-3090200\\sqldb.db')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def generate_unique_filename(filename):
return str(uuid.uuid4())[:8] + '.' + filename.rsplit('.', 1)[1]
def similar():
#path = r'testpic/TEST%d/%d.JPG'
for i in xrange(1, 10):
print 'test_case_%d: %.3f%%'%(i, \
histsimilar.calc_similar_by_path('testpic/TEST%d/%d.JPG'%(i, 1), 'testpic/TEST%d/%d.JPG'%(i, 2))*100)
def list():
for root,dirs,files in os.walk(r'F:\\flask-file-upload-example-master\\uploads'):
for file in files:
print root + os.sep + file
def equals(dict):
list="[{'imgurl':'"+dict[1]['imgurl']+"','title':'"+dict[1]['title']+"','name':'"+dict[1]['name']+"'}]"
return eval(list)
def data(f1):
#f = open("database/test.txt")
dict=f1.read()
#f.close()
return eval(dict)
@app.route('/', methods=['GET', 'POST'])
def index():
# histsimilar.data()
#dict=data(f1)
#histsimilar.list2('F:\\flask-file-upload-example-master\\uploads',histsimilar.data(filename))
#similar()
#dict=[{'imgurl':'../uploads/6f7088b4.jpg','title':'1aaa'},{'imgurl':'../uploads/7bf7191e.jpg','title':'2bbb'},{'imgurl':'../uploads/4b721df0.jpg','title':'s9999'}]
#list()
#init()
#delete_test()
#fetchall_test()
#r=fetchall_test()
ftext=open('iptest.txt').read()
histsimilar.write('iptest.txt',ftext+','+request.remote_addr)
if request.method == 'POST':
file = request.files['file']
#print lii.append({'imgurl':'','title':'','name':'12.jpg'})
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], "a.jpg"))
return render_template('index.html',dict=histsimilar.list2('F:\\flask-file-upload-example-master\\uploads'))
return render_template('index.html',dict=histsimilar.data(filename),user_ip = request.remote_addr)
@app.route('/loginup', methods=['GET', 'POST'])
def loginup():
if request.method == 'POST':
return render_template('index.html',dict=histsimilar.data(filename),username=client.selname(request.form['username'],request.form['password']),ipadd='/space?unicode='+request.form['username'])
@app.route('/regisiterup', methods=['GET', 'POST'])
def regisiterup():
if request.method == 'POST':
client.insertmes('(\''+request.form['username']+'\',\''+request.form['password']+'\',\''+request.form['email']+'\')')
return render_template('index.html',dict=histsimilar.data(filename),user_ip = request.remote_addr)
@app.route('/space', methods=['GET', 'POST'])
def space():
if request.method == 'POST':
file = request.files['file']
dygeneratehtml.replace()
#print lii.append({'imgurl':'','title':'','name':'12.jpg'})
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOADFILE_FOLDER'],file.filename))
if file and allowed_file(file.filename):
client.send_file2 ('validate.txt','101.200.167.44')
if file and allowed_file(file.filename):
client.send_file ('uploadsfile/'+file.filename,'101.200.167.44')
return render_template('space.html',hrefadd='ftp://'+request.args.get('fileadd')+'/'+file.filename)
if open('temp/tmp.txt').read()=='true' and request.args.get('unicode')=='jgx020': #advanced
return render_template('space.html',hrefadd=client.seladdr('jgx020')[3:len(open('temp/tmp2.txt').read())-4])
else:
return render_template('404.html')
@app.route('/fileuploadsys')
def fileuploadsys():
return render_template('fileuploadsys.html')
@app.route('/map',methods=['POST', 'GET'])
def map():
if request.method =='POST':
if geo.position(request.form['position']) is not None:
return render_template('map.html',longs=geo.position(request.form['position']).split(':')[0],lati=geo.position(request.form['position']).split(':')[1])
else:
return render_template('404.html')
return render_template('map.html',longs='116.391248',lati='39.9059631')
@app.route('/uploadstext',methods=['POST', 'GET'])
def uploadtext():
if request.method == 'POST':
histsimilar.writecontent(request.form['content'])
if request.args.get('i') is not None:
b=int(request.args.get('i'))
if len(histsimilar.writelist())/9+1==b:
return render_template('editor.html',content1=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-9]).read(),content2=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-8]).read(),content3=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-7]).read(),content4=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-6]).read(),content5=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-5]).read(),content6=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-4]).read(),content7=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-3]).read(),content8=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-2]).read(),content9=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-1]).read())
return render_template('editor.html',content1=open('database/'+histsimilar.writelist()[b*9-9]).read(),content2=open('database/'+histsimilar.writelist()[b*9-8]).read(),content3=open('database/'+histsimilar.writelist()[b*9-7]).read(),content4=open('database/'+histsimilar.writelist()[b*9-6]).read(),content5=open('database/'+histsimilar.writelist()[b*9-5]).read(),content6=open('database/'+histsimilar.writelist()[b*9-4]).read(),content7=open('database/'+histsimilar.writelist()[b*9-3]).read(),content8=open('database/'+histsimilar.writelist()[b*9-2]).read(),content9=open('database/'+histsimilar.writelist()[b*9-1]).read())
return render_template('index.html',dict=histsimilar.data(filename))
@app.route('/jump', methods=['GET', 'POST'])
def find():
if request.method == 'POST':
file = request.files['file']
#print lii.append({'imgurl':'','title':'','name':'12.jpg'})
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], histsimilar.filesave(time.strftime('%Y-%m-%d %H-%M-%S',time.localtime(int(time.time())))+".jpg")))
return render_template('index.html',dict=histsimilar.list2('F:\\flask-file-upload-example-master\\uploads'))
@app.route('/login',methods=['GET','POST'])
def login():
return render_template('login.html')
@app.route('/direct',methods=['POST', 'GET'])
def derictfile():
return render_template('editor.html',img_from_args=request.args.get('img'),page=request.args.get('i'),content1=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-9]).read(),content2=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-8]).read(),content3=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-7]).read(),content4=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-6]).read(),content5=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-5]).read(),content6=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-4]).read(),content7=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-3]).read(),content8=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-2]).read(),content9=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-1]).read())
@app.route('/right',methods=['POST', 'GET'])
def rights():
return render_template('editor.html',data=json.dumps(histsimilar.addlistup(0)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down',methods=['POST', 'GET'])
def downs():
return render_template('editor.html',datad=json.dumps(histsimilar.addlistdown(0)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right1',methods=['POST', 'GET'])
def rights1():
return render_template('editor.html',datad=json.dumps(histsimilar.addlistdown(0)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down1',methods=['POST', 'GET'])
def downs1():
return render_template('editor.html',datad1=json.dumps(histsimilar.addlistdown(1)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right2',methods=['POST', 'GET'])
def rights2():
return render_template('editor.html',data2=json.dumps(histsimilar.addlistup(2)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down2',methods=['POST', 'GET'])
def downs2():
return render_template('editor.html',datad2=json.dumps(histsimilar.addlistdown(2)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right3',methods=['POST', 'GET'])
def rights3():
return render_template('editor.html',data3=json.dumps(histsimilar.addlistup(3)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down3',methods=['POST', 'GET'])
def downs3():
return render_template('editor.html',datad3=json.dumps(histsimilar.addlistdown(3)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right4',methods=['POST', 'GET'])
def rights4():
return render_template('editor.html',data4=json.dumps(histsimilar.addlistup(4)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down4',methods=['POST', 'GET'])
def downs4():
return render_template('editor.html',datad4=json.dumps(histsimilar.addlistdown(4)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right5',methods=['POST', 'GET'])
def rights5():
return render_template('editor.html',data5=json.dumps(histsimilar.addlistup(5)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down5',methods=['POST', 'GET'])
def downs5():
return render_template('editor.html',datad5=json.dumps(histsimilar.addlistdown(5)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right6',methods=['POST', 'GET'])
def rights6():
return render_template('editor.html',data6=json.dumps(histsimilar.addlistup(6)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down6',methods=['POST', 'GET'])
def downs6():
return render_template('editor.html',datad6=json.dumps(histsimilar.addlistdown(6)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right7',methods=['POST', 'GET'])
def rights7():
return render_template('editor.html',data7=json.dumps(histsimilar.addlistup(7)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down7',methods=['POST', 'GET'])
def downs7():
return render_template('editor.html',datad7=json.dumps(histsimilar.addlistdown(7)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/register',methods=['GET','POST'])
def register():
return render_template('register.html')
@app.route('/pagejump',methods=['POST', 'GET'])
def index2():
if request.args.get('pages') is None:
return render_template('index.html',dict=histsimilar.data2(filename,request.args.get('pages')))
else:
return render_template('index.html',dict=histsimilar.data2(filename,2))
@app.route('/_add_numbersss')
def add_numbersss():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
@app.route('/_add_numbers',methods=['POST', 'GET'])
def add_numbers():
textreplace.replace(open('database/test1.txt').readlines())
return render_template('index.html',dict=histsimilar.data(filename))
if __name__ == '__main__':
app.run(host="0.0.0.0",port=80)
| 68.843683 | 953 | 0.66479 |
37c94c164297fb7178f10c4d22b15558d35a18be | 435 | py | Python | reviews/migrations/0033_auto_20200131_1613.py | UrbanBogger/horrorexplosion | 3698e00a6899a5e8b224cd3d1259c3deb3a2ca80 | [
"MIT"
] | null | null | null | reviews/migrations/0033_auto_20200131_1613.py | UrbanBogger/horrorexplosion | 3698e00a6899a5e8b224cd3d1259c3deb3a2ca80 | [
"MIT"
] | 4 | 2020-06-05T18:21:18.000Z | 2021-06-10T20:17:31.000Z | reviews/migrations/0033_auto_20200131_1613.py | UrbanBogger/horrorexplosion | 3698e00a6899a5e8b224cd3d1259c3deb3a2ca80 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2020-01-31 16:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reviews', '0032_auto_20200126_1743'),
]
operations = [
migrations.AlterModelOptions(
name='alternatelength',
options={'ordering': ['alternative_duration']},
),
]
| 21.75 | 59 | 0.632184 |
e792e18d2e05526f941750851ab8789b75aef55b | 400 | py | Python | project_name/wsgi.py | julianwachholz/django-headstart | e7ab84d914dd952545aed567092e72c511b09535 | [
"MIT"
] | null | null | null | project_name/wsgi.py | julianwachholz/django-headstart | e7ab84d914dd952545aed567092e72c511b09535 | [
"MIT"
] | null | null | null | project_name/wsgi.py | julianwachholz/django-headstart | e7ab84d914dd952545aed567092e72c511b09535 | [
"MIT"
] | null | null | null | """
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.666667 | 78 | 0.7825 |
c31f39dd23d35b335b42bd925accae8430b539b8 | 32 | py | Python | tests/unit/providers/singleton/__init__.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/providers/singleton/__init__.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/providers/singleton/__init__.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | """Singleton provider tests."""
| 16 | 31 | 0.6875 |
077d812feffb19b7e995a849c3713e11697e3ef1 | 6,125 | py | Python | oba/check_attendance.py | philip-baker/engage | dbc4c3278db07fd3b4c678af337a59ab3c319b55 | [
"MIT"
] | null | null | null | oba/check_attendance.py | philip-baker/engage | dbc4c3278db07fd3b4c678af337a59ab3c319b55 | [
"MIT"
] | null | null | null | oba/check_attendance.py | philip-baker/engage | dbc4c3278db07fd3b4c678af337a59ab3c319b55 | [
"MIT"
] | null | null | null | """Documentation for build_system.py
This script is part of the OBA (Out-of-the-Box-Attendance) it has been designed to be used on a daily basis for a given course. As an input it will
look for an image (named sample.jpg), which if it cannot find it will prompt the user to take an image using any camera attached to the computer. Then
the Tiny Face Detector
it looks for an image (named sample.jpg) as an input in the folder sample_images
if it cannot find a picture, it will attempt to take a picture with whatever camera is connected to the computer. Then the Tiny Face Detector is
used to detect faces in the image, before using ArcFace to compare these images against the profile images for students in the class. The results
(Present / Absent for each student) are then written to the engage.db
To run on CPU specifiy the gpu argument as follows check_attendance.py --gpu -1
"""
import sys
import os
currDir = os.getcwd()
os.chdir('..')
sys.path.append(os.getcwd() + '/helper')
sys.path.append(os.getcwd() + '/helper/tinyfaces')
sys.path.append(os.getcwd() +'/models/model-r100-ii')
os.chdir(currDir)
import argparse
import cv2
import numpy as np
import json
from csv import writer
from datetime import datetime
import sqlite3
from io import BytesIO
import torch
import PIL.Image
from torchvision import transforms
__author__ = "Philip Baker & Keith Spencer-Edgar"
__date__ = "25-10-2020"
import face_model
from engagement_model import EngageModel
from functions import get_detections
from model.utils import get_model
# User arguments
parser = argparse.ArgumentParser(description='face model test')
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../models/model-r100-ii/model,0', help='path to load model.')
parser.add_argument('--gpu', default=-1, type=int, help='gpu id (-1 to run on CPU)')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
parser.add_argument('--code', default='my_course', type=str, help='The course code of the lecture')
args = parser.parse_args()
# Check for a user supplied image
count = 0
for filename in os.listdir('sample_images'):
if filename.endswith(".jpg"):
count = count + 1
# Use open cv to take an image of the audience
if count == 0:
cap = cv2.VideoCapture(0) # video capture source camera (Here webcam of laptop)
ret, frame = cap.read() # return a single frame in variable `frame`
while(True):
cv2.imshow('img1',frame) # display the captured image
if cv2.waitKey(0) & 0xFF == ord('q'): # save on pressing 's'
cv2.imwrite('sample_images/sample.jpg', frame)
cv2.destroyAllWindows()
break
cap.release()
class_image = 'sample_images/sample.jpg'
# Arguments for the Tiny Face Detector
class args_eval():
def __init__(self):
self.nms_thresh = 0.3
self.prob_thresh = 0.03
self.checkpoint = "../models/tinyfaces/checkpoint_50.pth"
self.template_file = "../helper/tinyfaces/data/templates.json"
self.threshold_score = 0
args_tinyface = args_eval()
# Get templates for Tiny Face Detector
templates = json.load(open(args_tinyface.template_file))
json.dump(templates, open(args_tinyface.template_file, "w"))
templates = np.round_(np.array(templates), decimals=8)
num_templates = templates.shape[0]
# Getting transforms for the Tiny Face Detector
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_transforms = transforms.Compose([transforms.ToTensor(), normalize])
# Specify whether to run on GPU or CPU based on user arguments
if args.gpu > 0:
device = torch.device('gpu')
else:
device = torch.device('cpu')
# Load the Tiny Face Detector
model_tinyfaces = get_model(args_tinyface.checkpoint, num_templates=num_templates, gpu= (args.gpu > 0))
# Specifications for the Tiny Face Detector
rf = {
'size': [859, 859],
'stride': [8, 8],
'offset': [-1, -1]
}
# Load the insightface ArcFace Face Recognition model
model_arcface = face_model.FaceModel(args)
# Downscale iamge
img = PIL.Image.open(class_image)
basewidth = 800
scale_ = (basewidth / float(img.size[0]))
if float(img.size[0]) > basewidth:
hsize = int((float(img.size[1]) * float(scale_)))
img_downscaled = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS).convert('RGB')
else:
scale_ = 1
img_downscaled = img.convert('RGB')
# User Tiny Face Detector to get detections from downscaled image
img_tensor = transforms.functional.to_tensor(img_downscaled)
dets = get_detections(model_tinyfaces, img_tensor, templates, rf, val_transforms,
prob_thresh=args_tinyface.prob_thresh,
nms_thresh=args_tinyface.nms_thresh, device=device)
img = PIL.Image.open(class_image)
class_faces = list()
class_scores = list()
# Resize image according to quality
basewidth = int(float(img.size[0]))
hsize = int(float(img.size[1]))
this_img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS).convert('RGB')
# Load student profile embeddings for the class in progress
data = EngageModel.get_profiles(args)
# Get all detections
for i in range(len(dets)):
if dets[i][4] > args_tinyface.threshold_score: # Check detector confidence is above threshold
bbox = dets[i][0:4]
new_bbox = bbox * (1 / scale_)
face_width = new_bbox[2] - new_bbox[0]
this_face = np.array(this_img.crop(new_bbox)) # Get cropped face
class_faces.append(list([this_face[:, :, ::-1].copy(), face_width])) # Append the cropped face
# Calculate arcface embeddings for each sample picture
detection_features, face_widths = EngageModel.get_embeddings(model_arcface, class_faces)
# Compare samples to profile face embeddings
engage_model = EngageModel(args)
EngageModel.compare_embeddings(engage_model, detection_features, data)
sys.exit(0)
| 37.576687 | 155 | 0.722776 |
4a9ff274e405a4c8906cc25a45dea14b61a09ae2 | 252 | py | Python | aula1/erros.py | ricMuehlbauer/curso-flask | 530c8d33cdb62cfaf98ad05c2ee87d8bc301a4b6 | [
"Unlicense"
] | null | null | null | aula1/erros.py | ricMuehlbauer/curso-flask | 530c8d33cdb62cfaf98ad05c2ee87d8bc301a4b6 | [
"Unlicense"
] | null | null | null | aula1/erros.py | ricMuehlbauer/curso-flask | 530c8d33cdb62cfaf98ad05c2ee87d8bc301a4b6 | [
"Unlicense"
] | null | null | null | a = 0
b = 10
# EAFP - Easy to access forgiveness before permission
try:
print(b / a)
except AttributeError as e:
print("Nao posso transformar n em maiusculo", str(e))
except ZeroDivisionError as e:
print("Deu erro, tenta de novo", str(e)) | 22.909091 | 57 | 0.690476 |
b23f386d11a3556e850098413cf5f78974e69e46 | 4,434 | py | Python | nn_dataflow/tools/nn_layer_stats.py | Pingziwalk/nn_dataflow | 5ae8eeba4e243df6e9a69127073513a852a62d17 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/tools/nn_layer_stats.py | Pingziwalk/nn_dataflow | 5ae8eeba4e243df6e9a69127073513a852a62d17 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/tools/nn_layer_stats.py | Pingziwalk/nn_dataflow | 5ae8eeba4e243df6e9a69127073513a852a62d17 | [
"BSD-3-Clause"
] | null | null | null | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import argparse
import sys
from nn_dataflow.core import ConvLayer, FCLayer
from nn_dataflow.nns import import_network
KILO = 1024.
MILLION = 1024.*1024.
STR_FMT_NAME_LEN = '30'
STR_FMT_NUMB_LEN = '12'
STR_FMT_NUMB_PCS = '2'
STR_FMT_NAME = '{:' + STR_FMT_NAME_LEN + 's}'
STR_FMT_NUMB_HDER = '{:>' + STR_FMT_NUMB_LEN + '}'
STR_FMT_NUMB = '{:' + STR_FMT_NUMB_LEN + '.' + STR_FMT_NUMB_PCS + 'f}'
def layer_stats(args):
''' Print stats of layers in the network. '''
network = import_network(args.net)
word_bytes = (args.word + 7) // 8
batch = args.batch
hder_fmt = ','.join([STR_FMT_NAME] + [STR_FMT_NUMB_HDER] * 5) + '\n'
line_fmt = ','.join([STR_FMT_NAME] + [STR_FMT_NUMB] * 5) + '\n'
line_sep = '-' * int(STR_FMT_NAME_LEN) + '\n'
# Header.
sys.stdout.write(hder_fmt
.format('Layer',
'Ifmap/kB', 'Ofmap/kB', 'Weight/kB',
'MACs/M', 'MinOptBuf/kB'))
# Aggregate stats.
max_fmaps = 0
max_filters = 0
max_ops = 0
sum_fmaps = 0
sum_filters = 0
sum_ops = 0
convs = 0
fcs = 0
for name in network:
layer = network[name]
if isinstance(layer, FCLayer):
fcs += 1
elif isinstance(layer, ConvLayer):
convs += 1
ifmap_size = layer.total_ifmap_size(word_bytes) * batch / KILO
ofmap_size = layer.total_ofmap_size(word_bytes) * batch / KILO
try:
filter_size = layer.total_filter_size(word_bytes) / KILO
except AttributeError:
filter_size = 0
ops = layer.total_ops(batch) / MILLION
# The minimum optimal buffer size is the sum of the full size (two
# dimensions) for one data category, the size of one dimension for the
# second, and the size of one point for the third.
min_opt_buf_size = min(
filter_size + (ifmap_size + ofmap_size / layer.nofm) / batch,
filter_size + (ifmap_size / layer.nifm + ofmap_size) / batch,
ifmap_size + (ofmap_size + filter_size / layer.nifm) / layer.nofm,
ifmap_size + (ofmap_size / batch + filter_size) / layer.nofm,
ofmap_size + (ifmap_size + filter_size / layer.nofm) / layer.nifm,
ofmap_size + (ifmap_size / batch + filter_size) / layer.nifm)
sys.stdout.write(line_fmt
.format(name,
ifmap_size, ofmap_size, filter_size,
ops, min_opt_buf_size))
max_fmaps = max(max_fmaps, ofmap_size)
max_filters = max(max_filters, filter_size)
max_ops = max(max_ops, ops)
sum_fmaps += ofmap_size
sum_filters += filter_size
sum_ops += ops
sys.stdout.write(line_sep)
sys.stdout.write(line_fmt
.format('MAX',
float('nan'), max_fmaps, max_filters,
max_ops, float('nan')))
sys.stdout.write(line_fmt
.format('SUM',
float('nan'), sum_fmaps, sum_filters,
sum_ops, float('nan')))
sys.stdout.write(line_sep)
sys.stdout.write('# CONV layers = {}, # FC layers = {}\n'
.format(convs, fcs))
def argparser():
''' Argument parser. '''
ap = argparse.ArgumentParser()
ap.add_argument('net',
help='network name, should be a .py file under examples')
ap.add_argument('-b', '--batch', type=int, default=1,
help='batch size')
ap.add_argument('-w', '--word', type=int, default=16,
help='word size in bits')
return ap
if __name__ == '__main__':
layer_stats(argparser().parse_args())
| 31.899281 | 79 | 0.59585 |
564d351e0e4eec58b5401240b62cee6ab15ba035 | 572 | py | Python | examples/notepad/notepad/__init__.py | SunChuquin/pyqode.core | edf29204446e3679701e74343288cf692eb07d86 | [
"MIT"
] | 23 | 2015-01-08T15:04:47.000Z | 2022-03-08T07:47:08.000Z | examples/notepad/notepad/__init__.py | SunChuquin/pyqode.core | edf29204446e3679701e74343288cf692eb07d86 | [
"MIT"
] | 16 | 2021-02-01T08:54:08.000Z | 2022-01-09T10:23:57.000Z | examples/notepad/notepad/__init__.py | SunChuquin/pyqode.core | edf29204446e3679701e74343288cf692eb07d86 | [
"MIT"
] | 24 | 2015-01-09T14:16:41.000Z | 2021-12-06T15:11:22.000Z | # -*- coding: utf-8 -*-
"""
This package contains the code of the notepad application:
- editor: contains our custom CodeEdit class definition. This is just
a CodeEdit configured with a set of modes and panels.
- main_window: This is the main window of the application
- server.py: This is the server script for the pyqode backend.
"""
import sys
from pyqode.qt.QtWidgets import QApplication
from .main_window import MainWindow
__version__ = '0.0.1'
def main():
app = QApplication(sys.argv)
win = MainWindow()
win.show()
app.exec_()
| 22.88 | 73 | 0.702797 |
f03305d66ef3a208b36f389bd466786221365fee | 723 | py | Python | DB/DAO/user.py | jaelyangChoi/ICO | e816a0d050cc2906caf71d2abfa6a04d438ed5b4 | [
"MIT"
] | null | null | null | DB/DAO/user.py | jaelyangChoi/ICO | e816a0d050cc2906caf71d2abfa6a04d438ed5b4 | [
"MIT"
] | null | null | null | DB/DAO/user.py | jaelyangChoi/ICO | e816a0d050cc2906caf71d2abfa6a04d438ed5b4 | [
"MIT"
] | null | null | null | from DAO.sqlExecution import *
from DTO.user import *
from SQL.user import UserSQL as SQL
class UserDAO(SqlExecution):
def select_user_by_email(self, email):
try:
conn = self.db_conn.get_connection()
cursor = conn.cursor()
cursor.execute(SQL.SELECT_USER, email)
result = cursor.fetchone()
user = User()
user.set_all(result)
self.db_conn.close_db()
return user
except Exception as e:
print(e)
raise e
def is_existing_email(self, email):
if self.execute_sql_for_one_result(email, SQL.CHECK_EMAIL) == 1:
return True
else:
return False
| 24.1 | 72 | 0.576763 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.