id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
5130957 | JUMPS = ['JALR', 'JAL']
BRANCHES = ['BEQ', 'BNE', 'BLT', 'BGE', 'BLTU', 'BGEU']
LOADS = ['LD', 'LW', 'LH', 'LB', 'LWU', 'LHU', 'LBU']
STORES = ['SD', 'SW', 'SH', 'SB']
def register_mask(wbits):
return {
64: (2 ** 64) - 1,
32: (2 ** 32) - 1,
16: (2 ** 16) - 1,
8: (2 ** 8) - 1,
}.get(wbits, None)
def integer_to_list_of_bytes(v, wbits, byte_order):
return list((v & register_mask(wbits)).to_bytes((wbits // 8), byte_order)) | StarcoderdataPython |
3486213 | <gh_stars>0
import math
from datetime import datetime, timedelta
import numpy as np
import logging
import pandas as pd
from scipy import stats as sps
from scipy import signal
from matplotlib import pyplot as plt
import us
import structlog
from pyseir import load_data
from pyseir.utils import AggregationLevel, TimeseriesType
from pyseir.utils import get_run_artifact_path, RunArtifact
from pyseir.parameters.parameter_ensemble_generator import ParameterEnsembleGenerator
from structlog.threadlocal import bind_threadlocal, clear_threadlocal, merge_threadlocal
from structlog import configure
from enum import Enum
from pyseir.inference.infer_utils import LagMonitor
configure(processors=[merge_threadlocal, structlog.processors.KeyValueRenderer()])
log = structlog.get_logger(__name__)
class InferRtConstants:
RNG_SEED = 42
# Don't try to infer Rt for timeseries shorter than this
MIN_TIMESERIES_LENGTH = 20
# Settings for outlier removal
LOCAL_LOOKBACK_WINDOW = 14
Z_THRESHOLD = 10
MIN_MEAN_TO_CONSIDER = 5
# Window size used during smoothing of cases and deaths
# Originally 14 but odd is better and larger avoids edges that drive R unrealistically
COUNT_SMOOTHING_WINDOW_SIZE = 19
# Infer Rt only using cases if True
# Recommend True as deaths just confuse intepretability of Rt_eff and will muddy using its extrapolation
DISABLE_DEATHS = True
# Sets the default value for sigma before adustments
# Recommend .03 (was .05 before when not adjusted) as adjustment moves up
DEFAULT_PROCESS_SIGMA = 0.03
# Scale sigma up as sqrt(SCALE_SIGMA_FROM_COUNT/current_count)
# 5000 recommended
SCALE_SIGMA_FROM_COUNT = 5000.0
# Maximum increase (from DEFAULT_PROCESS_SIGMA) permitted for low counts
# Recommend range 20. - 50. 30. appears to be best
MAX_SCALING_OF_SIGMA = 30.0
# Override min_cases and min_deaths with this value.
# Recommend 1. - 5. range. 1. is allowing some counties to run that shouldn't (unphysical results)
MIN_COUNTS_TO_INFER = 5.0
# TODO really understand whether the min_cases and/or min_deaths compares to max, avg, or day to day counts
# Smooth RTeff (Rt_MAP_composite) to make less reactive in the short term while retaining long
# term shape correctly.
SMOOTH_RT_MAP_COMPOSITE = 1 # number of times to apply soothing
RT_SMOOTHING_WINDOW_SIZE = 25 # also controls kernel_std
# Minimum (half) width of confidence interval in composite Rt
# Avoids too narrow values when averaging over timeseries that already have high confidence
MIN_CONF_WIDTH = 0.1
# Small epsilon to prevent divide by 0 errors.
EPSILON = 1e-8
class RtInferenceEngine:
"""
This class extends the analysis of Bettencourt et al to include mortality
and hospitalization data in a pseudo-non-parametric inference of R_t.
Parameters
----------
fips: str
State or County fips code
window_size: int
Size of the sliding Gaussian window to compute. Note that kernel std
sets the width of the kernel weight.
kernel_std: int
Width of the Gaussian kernel.
r_list: array-like
Array of R_t to compute posteriors over. Doesn't really need to be
configured.
process_sigma: float
Stdev of the process model. Increasing this allows for larger
instant deltas in R_t, shrinking it smooths things, but allows for
less rapid change. Can be interpreted as the std of the allowed
shift in R_t day-to-day.
ref_date:
Reference date to compute from.
confidence_intervals: list(float)
Confidence interval to compute. 0.95 would be 90% credible
intervals from 5% to 95%.
min_cases: int
Minimum number of cases required to run case level inference. These are
very conservatively weak filters, but prevent cases of basically zero
data from introducing pathological results.
min_deaths: int
Minimum number of deaths required to run death level inference.
include_testing_correction: bool
If True, include a correction for testing increases and decreases.
"""
def __init__(
self,
fips,
window_size=InferRtConstants.COUNT_SMOOTHING_WINDOW_SIZE,
kernel_std=5,
r_list=np.linspace(0, 10, 501),
process_sigma=0.05,
ref_date=datetime(year=2020, month=1, day=1),
confidence_intervals=(0.68, 0.95),
min_cases=5,
min_deaths=5,
include_testing_correction=True,
):
np.random.seed(InferRtConstants.RNG_SEED)
# Param Generation used for Xcor in align_time_series, has some stochastic FFT elements.
self.fips = fips
self.r_list = r_list
self.window_size = window_size
self.kernel_std = kernel_std
self.process_sigma = process_sigma
self.ref_date = ref_date
self.confidence_intervals = confidence_intervals
self.min_cases = min_cases
self.min_deaths = min_deaths
self.include_testing_correction = include_testing_correction
# Because rounding is disabled we don't need high min_deaths, min_cases anymore
self.min_cases = min(InferRtConstants.MIN_COUNTS_TO_INFER, self.min_cases)
if not InferRtConstants.DISABLE_DEATHS:
self.min_deaths = min(InferRtConstants.MIN_COUNTS_TO_INFER, self.min_deaths)
if len(fips) == 2: # State FIPS are 2 digits
self.agg_level = AggregationLevel.STATE
self.state_obj = us.states.lookup(self.fips)
self.state = self.state_obj.name
(
self.times,
self.observed_new_cases,
self.observed_new_deaths,
) = load_data.load_new_case_data_by_state(
self.state,
self.ref_date,
include_testing_correction=self.include_testing_correction,
)
self.times_raw_new_cases, self.raw_new_cases, _ = load_data.load_new_case_data_by_state(
self.state, self.ref_date, include_testing_correction=False
)
(
self.hospital_times,
self.hospitalizations,
self.hospitalization_data_type,
) = load_data.load_hospitalization_data_by_state(
state=self.state_obj.abbr, t0=self.ref_date
)
self.display_name = self.state
else:
self.agg_level = AggregationLevel.COUNTY
self.geo_metadata = (
load_data.load_county_metadata().set_index("fips").loc[fips].to_dict()
)
self.state = self.geo_metadata["state"]
self.state_obj = us.states.lookup(self.state)
self.county = self.geo_metadata["county"]
if self.county:
self.display_name = self.county + ", " + self.state
else:
self.display_name = self.state
(
self.times,
self.observed_new_cases,
self.observed_new_deaths,
) = load_data.load_new_case_data_by_fips(
self.fips,
t0=self.ref_date,
include_testing_correction=self.include_testing_correction,
)
(
self.times_raw_new_cases,
self.raw_new_cases,
_,
) = load_data.load_new_case_data_by_fips(
self.fips, t0=self.ref_date, include_testing_correction=False,
)
(
self.hospital_times,
self.hospitalizations,
self.hospitalization_data_type,
) = load_data.load_hospitalization_data(self.fips, t0=self.ref_date)
self.case_dates = [ref_date + timedelta(days=int(t)) for t in self.times]
self.raw_new_case_dates = [
ref_date + timedelta(days=int(t)) for t in self.times_raw_new_cases
]
if self.hospitalization_data_type:
self.hospital_dates = [ref_date + timedelta(days=int(t)) for t in self.hospital_times]
self.default_parameters = ParameterEnsembleGenerator(
fips=self.fips, N_samples=500, t_list=np.linspace(0, 365, 366)
).get_average_seir_parameters()
# Serial period = Incubation + 0.5 * Infections
self.serial_period = (
1 / self.default_parameters["sigma"] + 0.5 * 1 / self.default_parameters["delta"]
)
# If we only receive current hospitalizations, we need to account for
# the outflow to reconstruct new admissions.
if (
self.hospitalization_data_type
is load_data.HospitalizationDataType.CURRENT_HOSPITALIZATIONS
):
los_general = self.default_parameters["hospitalization_length_of_stay_general"]
los_icu = self.default_parameters["hospitalization_length_of_stay_icu"]
hosp_rate_general = self.default_parameters["hospitalization_rate_general"]
hosp_rate_icu = self.default_parameters["hospitalization_rate_icu"]
icu_rate = hosp_rate_icu / hosp_rate_general
flow_out_of_hosp = self.hospitalizations[:-1] * (
(1 - icu_rate) / los_general + icu_rate / los_icu
)
# We are attempting to reconstruct the cumulative hospitalizations.
self.hospitalizations = np.diff(self.hospitalizations) + flow_out_of_hosp
self.hospital_dates = self.hospital_dates[1:]
self.hospital_times = self.hospital_times[1:]
self.log_likelihood = None
self.log = structlog.getLogger(Rt_Inference_Target=self.display_name)
self.log.info(event="Running:")
def get_timeseries(self, timeseries_type):
"""
Given a timeseries type, return the dates, times, and hospitalizations.
Parameters
----------
timeseries_type: TimeseriesType
Which type of time-series to return.
Returns
-------
dates: list(datetime)
Dates for each observation
times: list(int)
Integer days since the reference date.
timeseries:
The requested timeseries.
"""
timeseries_type = TimeseriesType(timeseries_type)
if timeseries_type is TimeseriesType.NEW_CASES:
return self.case_dates, self.times, self.observed_new_cases
elif timeseries_type is TimeseriesType.RAW_NEW_CASES:
return self.raw_new_case_dates, self.times_raw_new_cases, self.raw_new_cases
elif timeseries_type is TimeseriesType.NEW_DEATHS or TimeseriesType.RAW_NEW_DEATHS:
return self.case_dates, self.times, self.observed_new_deaths
elif timeseries_type in (
TimeseriesType.NEW_HOSPITALIZATIONS,
TimeseriesType.CURRENT_HOSPITALIZATIONS,
):
return self.hospital_dates, self.hospital_times, self.hospitalizations
def apply_gaussian_smoothing(self, timeseries_type, plot=True, smoothed_max_threshold=5):
"""
Apply a rolling Gaussian window to smooth the data. This signature and
returns match get_time_series, but will return a subset of the input
time-series starting at the first non-zero value.
Parameters
----------
timeseries_type: TimeseriesType
Which type of time-series to use.
plot: bool
If True, plot smoothed and original data.
smoothed_max_threshold: int
This parameter allows you to filter out entire series
(e.g. NEW_DEATHS) when they do not contain high enough
numeric values. This has been added to account for low-level
constant smoothed values having a disproportionate effect on
our final R(t) calculation, when all of their values are below
this parameter.
Returns
-------
dates: array-like
Input data over a subset of indices available after windowing.
times: array-like
Output integers since the reference date.
smoothed: array-like
Gaussian smoothed data.
"""
timeseries_type = TimeseriesType(timeseries_type)
dates, times, timeseries = self.get_timeseries(timeseries_type)
self.log = self.log.bind(timeseries_type=timeseries_type.value)
# Don't even try if the timeseries is too short (Florida hospitalizations failing with length=6)
if len(timeseries) < InferRtConstants.MIN_TIMESERIES_LENGTH:
return [], [], []
# Hospitalizations have a strange effect in the first few data points across many states.
# Let's just drop those..
if timeseries_type in (
TimeseriesType.CURRENT_HOSPITALIZATIONS,
TimeseriesType.NEW_HOSPITALIZATIONS,
):
dates, times, timeseries = dates[2:], times[:2], timeseries[2:]
# Remove Outliers Before Smoothing. Replaces a value if the current is more than 10 std
# from the 14 day trailing mean and std
timeseries = replace_outliers(pd.Series(timeseries), log=self.log)
# Smoothing no longer involves rounding
smoothed = timeseries.rolling(
self.window_size, win_type="gaussian", min_periods=self.kernel_std, center=True
).mean(std=self.kernel_std)
# Retain logic for detecting what would be nonzero values if rounded
nonzeros = [idx for idx, val in enumerate(smoothed.round()) if val != 0]
if smoothed.empty:
idx_start = 0
elif max(smoothed) < smoothed_max_threshold:
# skip the entire array.
idx_start = len(smoothed)
else:
idx_start = nonzeros[0]
smoothed = smoothed.iloc[idx_start:]
original = timeseries.loc[smoothed.index]
# Only plot counts and smoothed timeseries for cases
if plot and timeseries_type == TimeseriesType.NEW_CASES and len(smoothed) > 0:
plt.figure(figsize=(10, 6))
plt.scatter(
dates[-len(original) :],
original,
alpha=0.3,
label=timeseries_type.value.replace("_", " ").title() + "Shifted",
)
plt.plot(dates[-len(original) :], smoothed)
plt.grid(True, which="both")
plt.xticks(rotation=30)
plt.xlim(min(dates[-len(original) :]), max(dates) + timedelta(days=2))
# plt.legend()
output_path = get_run_artifact_path(self.fips, RunArtifact.RT_SMOOTHING_REPORT)
plt.savefig(output_path, bbox_inches="tight")
plt.close()
return dates, times, smoothed
def highest_density_interval(self, posteriors, ci):
"""
Given a PMF, generate the confidence bands.
Parameters
----------
posteriors: pd.DataFrame
Probability Mass Function to compute intervals for.
ci: float
Float confidence interval. Value of 0.95 will compute the upper and
lower bounds.
Returns
-------
ci_low: np.array
Low confidence intervals.
ci_high: np.array
High confidence intervals.
"""
posterior_cdfs = posteriors.values.cumsum(axis=0)
low_idx_list = np.argmin(np.abs(posterior_cdfs - (1 - ci)), axis=0)
high_idx_list = np.argmin(np.abs(posterior_cdfs - ci), axis=0)
ci_low = self.r_list[low_idx_list]
ci_high = self.r_list[high_idx_list]
return ci_low, ci_high
def make_process_matrix(self, timeseries_scale=InferRtConstants.SCALE_SIGMA_FROM_COUNT):
""" Externalizes process of generating the Gaussian process matrix adding the following:
1) Auto adjusts sigma from its default value for low counts - scales sigma up as
1/sqrt(count) up to a maximum factor of MAX_SCALING_OF_SIGMA
2) Ensures the smoothing (of the posterior when creating the prior) is symmetric
in R so that this process does not move argmax (the peak in probability)
"""
use_sigma = (
min(
InferRtConstants.MAX_SCALING_OF_SIGMA,
max(1.0, math.sqrt(InferRtConstants.SCALE_SIGMA_FROM_COUNT / timeseries_scale)),
)
* InferRtConstants.DEFAULT_PROCESS_SIGMA
)
process_matrix = sps.norm(loc=self.r_list, scale=use_sigma).pdf(self.r_list[:, None])
# process_matrix applies gaussian smoothing to the previous posterior to make the prior.
# But when the gaussian is wide much of its distribution function can be outside of the
# range Reff = (0,10). When this happens the smoothing is not symmetric in R space. For
# R<1, when posteriors[previous_day]).argmax() < 50, this asymmetry can push the argmax of
# the prior >10 Reff bins (delta R = .2) on each new day. This was a large systematic error.
# Ensure smoothing window is symmetric in X direction around diagonal
# to avoid systematic drift towards middle (Reff = 5). This is done by
# ensuring the following matrix values are 0:
# 1 0 0 0 0 0 ... 0 0 0 0 0 0
# * * * 0 0 0 ... 0 0 0 0 0 0
# ...
# * * * * * * ... * * * * 0 0
# * * * * * * ... * * * * * *
# 0 0 * * * * ... * * * * * *
# ...
# 0 0 0 0 0 0 ... 0 0 0 * * *
# 0 0 0 0 0 0 ... 0 0 0 0 0 1
sz = len(self.r_list)
for row in range(0, sz):
if row < (sz - 1) / 2:
process_matrix[row, 2 * row + 1 : sz] = 0.0
elif row > (sz - 1) / 2:
process_matrix[row, 0 : sz - 2 * (sz - row)] = 0.0
# (3a) Normalize all rows to sum to 1
row_sums = process_matrix.sum(axis=1)
for row in range(0, sz):
process_matrix[row] = process_matrix[row] / row_sums[row]
return (use_sigma, process_matrix)
def get_posteriors(self, timeseries_type, plot=False):
"""
Generate posteriors for R_t.
Parameters
----------
----------
timeseries_type: TimeseriesType
New X per day (cases, deaths etc).
plot: bool
If True, plot a cool looking est of posteriors.
Returns
-------
dates: array-like
Input data over a subset of indices available after windowing.
times: array-like
Output integers since the reference date.
posteriors: pd.DataFrame
Posterior estimates for each timestamp with non-zero data.
start_idx: int
Index of first Rt value calculated from input data series
#TODO figure out why this value sometimes truncates the series
"""
# Propagate self.min_[cases,deaths] into apply_gaussian_smoothing where used to abort
# processing of timeseries without high enough counts
smoothed_max_threshold = (
self.min_cases if TimeseriesType.NEW_CASES == timeseries_type else self.min_deaths
)
dates, times, timeseries = self.apply_gaussian_smoothing(
timeseries_type, smoothed_max_threshold=smoothed_max_threshold
)
if len(timeseries) == 0:
log.info(
"%s: empty timeseries %s, skipping" % (self.display_name, timeseries_type.value)
)
return None, None, None, None
else:
log.info(
"%s: Analyzing posteriors for timeseries %s"
% (self.display_name, timeseries_type.value)
)
# (1) Calculate Lambda (the Poisson likelihood given the data) based on
# the observed increase from t-1 cases to t cases.
lam = timeseries[:-1].values * np.exp((self.r_list[:, None] - 1) / self.serial_period)
# (2) Calculate each day's likelihood over R_t
# Originally smoothed counts were rounded (as needed for sps.poisson.pmf below) which doesn't
# work well for low counts and introduces artifacts at rounding transitions. Now calculate for
# both ceiling and floor values and interpolate between to get smooth behaviour
ts_floor = timeseries.apply(np.floor).astype(int)
ts_ceil = timeseries.apply(np.ceil).astype(int)
ts_frac = timeseries - ts_floor
likelihoods_floor = pd.DataFrame(
data=sps.poisson.pmf(ts_floor[1:].values, lam),
index=self.r_list,
columns=timeseries.index[1:],
)
likelihoods_ceil = pd.DataFrame(
data=sps.poisson.pmf(ts_ceil[1:].values, lam),
index=self.r_list,
columns=timeseries.index[1:],
)
# Interpolate between value for ceiling and floor of smoothed counts
likelihoods = ts_frac * likelihoods_ceil + (1 - ts_frac) * likelihoods_floor
# (3) Create the (now scaled up for low counts) Gaussian Matrix
(current_sigma, process_matrix) = self.make_process_matrix(timeseries.median())
# (3a) Normalize all rows to sum to 1
process_matrix /= process_matrix.sum(axis=0)
# (4) Calculate the initial prior. Gamma mean of "a" with mode of "a-1".
prior0 = sps.gamma(a=2.5).pdf(self.r_list)
prior0 /= prior0.sum()
reinit_prior = sps.gamma(a=2).pdf(self.r_list)
reinit_prior /= reinit_prior.sum()
# Create a DataFrame that will hold our posteriors for each day
# Insert our prior as the first posterior.
posteriors = pd.DataFrame(
index=self.r_list, columns=timeseries.index, data={timeseries.index[0]: prior0}
)
# We said we'd keep track of the sum of the log of the probability
# of the data for maximum likelihood calculation.
log_likelihood = 0.0
# Initialize timeseries scale (used for auto sigma)
scale = timeseries.head(1).item()
# Setup monitoring for Reff lagging signal in daily likelihood
monitor = LagMonitor(debug=False) # Set debug=True for detailed printout of daily lag
# (5) Iteratively apply Bayes' rule
for previous_day, current_day in zip(timeseries.index[:-1], timeseries.index[1:]):
# Keep track of exponential moving average of scale of counts of timeseries
scale = 0.9 * scale + 0.1 * timeseries[current_day]
# Calculate process matrix for each day
(current_sigma, process_matrix) = self.make_process_matrix(scale)
# (5a) Calculate the new prior
current_prior = process_matrix @ posteriors[previous_day]
# (5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t)
numerator = likelihoods[current_day] * current_prior
# (5c) Calculate the denominator of Bayes' Rule P(k)
denominator = np.sum(numerator)
# Execute full Bayes' Rule
if denominator == 0:
# Restart the baysian learning for the remaining series.
# This is necessary since otherwise NaN values
# will be inferred for all future days, after seeing
# a single (smoothed) zero value.
#
# We understand that restarting the posteriors with the
# re-initial prior may incur a start-up artifact as the posterior
# restabilizes, but we believe it's the current best
# solution for municipalities that have smoothed cases and
# deaths that dip down to zero, but then start to increase
# again.
posteriors[current_day] = reinit_prior
else:
posteriors[current_day] = numerator / denominator
# Monitors if posterior is lagging excessively behind signal in likelihood
# TODO future can return cumulative lag and use to scale sigma up only when needed
monitor.evaluate_lag_using_argmaxes(
current_day,
current_sigma,
posteriors[previous_day].argmax(),
current_prior.argmax(),
likelihoods[current_day].argmax(),
numerator.argmax(),
)
# Add to the running sum of log likelihoods
log_likelihood += np.log(denominator)
self.log_likelihood = log_likelihood
if plot:
plt.figure(figsize=(12, 8))
plt.plot(posteriors, alpha=0.1, color="k")
plt.grid(alpha=0.4)
plt.xlabel("$R_t$", fontsize=16)
plt.title("Posteriors", fontsize=18)
plt.close()
start_idx = -len(posteriors.columns)
return dates[start_idx:], times[start_idx:], posteriors, start_idx
def get_available_timeseries(self):
"""
Determine available timeseries for Rt inference calculation
with constraints below
Returns
-------
available_timeseries:
array of available timeseries saved as TimeseriesType
"""
available_timeseries = []
IDX_OF_COUNTS = 2
cases = self.get_timeseries(TimeseriesType.NEW_CASES.value)[IDX_OF_COUNTS]
deaths = self.get_timeseries(TimeseriesType.NEW_DEATHS.value)[IDX_OF_COUNTS]
if self.hospitalization_data_type:
hosps = self.get_timeseries(TimeseriesType.NEW_HOSPITALIZATIONS.value)[IDX_OF_COUNTS]
if np.sum(cases) > self.min_cases:
available_timeseries.append(TimeseriesType.NEW_CASES)
available_timeseries.append(TimeseriesType.RAW_NEW_CASES)
if np.sum(deaths) > self.min_deaths:
available_timeseries.append(TimeseriesType.RAW_NEW_DEATHS)
available_timeseries.append(TimeseriesType.NEW_DEATHS)
if (
self.hospitalization_data_type
is load_data.HospitalizationDataType.CURRENT_HOSPITALIZATIONS
and len(hosps > 3)
):
# We have converted this timeseries to new hospitalizations.
available_timeseries.append(TimeseriesType.NEW_HOSPITALIZATIONS)
elif (
self.hospitalization_data_type
is load_data.HospitalizationDataType.CUMULATIVE_HOSPITALIZATIONS
and len(hosps > 3)
):
available_timeseries.append(TimeseriesType.NEW_HOSPITALIZATIONS)
return available_timeseries
def infer_all(self, plot=True, shift_deaths=0):
"""
Infer R_t from all available data sources.
Parameters
----------
plot: bool
If True, generate a plot of the inference.
shift_deaths: int
Shift the death time series by this amount with respect to cases
(when plotting only, does not shift the returned result).
Returns
-------
inference_results: pd.DataFrame
Columns containing MAP estimates and confidence intervals.
"""
df_all = None
available_timeseries = self.get_available_timeseries()
for timeseries_type in available_timeseries:
# Add Raw Data Output to Output Dataframe
dates_raw, times_raw, timeseries_raw = self.get_timeseries(timeseries_type)
df_raw = pd.DataFrame()
df_raw["date"] = dates_raw
df_raw = df_raw.set_index("date")
df_raw[timeseries_type.value] = timeseries_raw
df = pd.DataFrame()
dates, times, posteriors, start_idx = self.get_posteriors(timeseries_type)
# Note that it is possible for the dates to be missing days
# This can cause problems when:
# 1) computing posteriors that assume continuous data (above),
# 2) when merging data with variable keys
if posteriors is None:
continue
df[f"Rt_MAP__{timeseries_type.value}"] = posteriors.idxmax()
for ci in self.confidence_intervals:
ci_low, ci_high = self.highest_density_interval(posteriors, ci=ci)
low_val = 1 - ci
high_val = ci
df[f"Rt_ci{int(math.floor(100 * low_val))}__{timeseries_type.value}"] = ci_low
df[f"Rt_ci{int(math.floor(100 * high_val))}__{timeseries_type.value}"] = ci_high
df["date"] = dates
df = df.set_index("date")
if df_all is None:
df_all = df
else:
# To avoid any surprises merging the data, keep only the keys from the case data
# which will be the first added to df_all. So merge with how ="left" rather than "outer"
df_all = df_all.merge(df_raw, left_index=True, right_index=True, how="left")
df_all = df_all.merge(df, left_index=True, right_index=True, how="left")
# ------------------------------------------------
# Compute the indicator lag using the curvature
# alignment method.
# ------------------------------------------------
if (
timeseries_type in (TimeseriesType.NEW_DEATHS, TimeseriesType.NEW_HOSPITALIZATIONS)
and f"Rt_MAP__{TimeseriesType.NEW_CASES.value}" in df_all.columns
):
# Go back up to 30 days or the max time series length we have if shorter.
last_idx = max(-21, -len(df))
series_a = df_all[f"Rt_MAP__{TimeseriesType.NEW_CASES.value}"].iloc[-last_idx:]
series_b = df_all[f"Rt_MAP__{timeseries_type.value}"].iloc[-last_idx:]
shift_in_days = self.align_time_series(series_a=series_a, series_b=series_b,)
df_all[f"lag_days__{timeseries_type.value}"] = shift_in_days
logging.debug(
"Using timeshift of: %s for timeseries type: %s ",
shift_in_days,
timeseries_type,
)
# Shift all the columns.
for col in df_all.columns:
if timeseries_type.value in col:
df_all[col] = df_all[col].shift(shift_in_days)
# Extend death and hopitalization rt signals beyond
# shift to avoid sudden jumps in composite metric.
#
# N.B interpolate() behaves differently depending on the location
# of the missing values: For any nans appearing in between valid
# elements of the series, an interpolated value is filled in.
# For values at the end of the series, the last *valid* value is used.
logging.debug("Filling in %s missing values", shift_in_days)
df_all[col] = df_all[col].interpolate(
limit_direction="forward", method="linear"
)
if df_all is None:
logging.warning("Inference not possible for fips: %s", self.fips)
return None
if (
not InferRtConstants.DISABLE_DEATHS
and "Rt_MAP__new_deaths" in df_all
and "Rt_MAP__new_cases" in df_all
):
df_all["Rt_MAP_composite"] = np.nanmean(
df_all[["Rt_MAP__new_cases", "Rt_MAP__new_deaths"]], axis=1
)
# Just use the Stdev of cases. A correlated quadrature summed error
# would be better, but is also more confusing and difficult to fix
# discontinuities between death and case errors since deaths are
# only available for a subset. Systematic errors are much larger in
# any case.
df_all["Rt_ci95_composite"] = df_all["Rt_ci95__new_cases"]
elif "Rt_MAP__new_cases" in df_all:
df_all["Rt_MAP_composite"] = df_all["Rt_MAP__new_cases"]
df_all["Rt_ci95_composite"] = df_all["Rt_ci95__new_cases"]
# Optionally Smooth just Rt_MAP_composite.
# Note this doesn't lag in time and preserves integral of Rteff over time
for i in range(0, InferRtConstants.SMOOTH_RT_MAP_COMPOSITE):
kernel_width = round(InferRtConstants.RT_SMOOTHING_WINDOW_SIZE / 4)
smoothed = (
df_all["Rt_MAP_composite"]
.rolling(
InferRtConstants.RT_SMOOTHING_WINDOW_SIZE,
win_type="gaussian",
min_periods=kernel_width,
center=True,
)
.mean(std=kernel_width)
)
# Adjust down confidence interval due to count smoothing over kernel_width values but not below .2
df_all["Rt_MAP_composite"] = smoothed
df_all["Rt_ci95_composite"] = (
(df_all["Rt_ci95_composite"] - df_all["Rt_MAP_composite"])
/ math.sqrt(
2.0 * kernel_width # averaging over many points reduces confidence interval
)
).apply(lambda v: max(v, InferRtConstants.MIN_CONF_WIDTH)) + df_all["Rt_MAP_composite"]
if plot:
plt.figure(figsize=(10, 6))
# plt.hlines([1.0], *plt.xlim(), alpha=1, color="g")
# plt.hlines([1.1], *plt.xlim(), alpha=1, color="gold")
# plt.hlines([1.3], *plt.xlim(), alpha=1, color="r")
if "Rt_ci5__new_deaths" in df_all:
if not InferRtConstants.DISABLE_DEATHS:
plt.fill_between(
df_all.index,
df_all["Rt_ci5__new_deaths"],
df_all["Rt_ci95__new_deaths"],
alpha=0.2,
color="firebrick",
)
# Show for reference even if not used
plt.scatter(
df_all.index,
df_all["Rt_MAP__new_deaths"].shift(periods=shift_deaths),
alpha=1,
s=25,
color="firebrick",
label="New Deaths",
)
if "Rt_ci5__new_cases" in df_all:
if not InferRtConstants.DISABLE_DEATHS:
plt.fill_between(
df_all.index,
df_all["Rt_ci5__new_cases"],
df_all["Rt_ci95__new_cases"],
alpha=0.2,
color="steelblue",
)
plt.scatter(
df_all.index,
df_all["Rt_MAP__new_cases"],
alpha=1,
s=25,
color="steelblue",
label="New Cases",
marker="s",
)
if "Rt_ci5__new_hospitalizations" in df_all:
if not InferRtConstants.DISABLE_DEATHS:
plt.fill_between(
df_all.index,
df_all["Rt_ci5__new_hospitalizations"],
df_all["Rt_ci95__new_hospitalizations"],
alpha=0.4,
color="darkseagreen",
)
# Show for reference even if not used
plt.scatter(
df_all.index,
df_all["Rt_MAP__new_hospitalizations"],
alpha=1,
s=25,
color="darkseagreen",
label="New Hospitalizations",
marker="d",
)
if "Rt_MAP_composite" in df_all:
plt.scatter(
df_all.index,
df_all["Rt_MAP_composite"],
alpha=1,
s=25,
color="black",
label="Inferred $R_{t}$ Web",
marker="d",
)
if "Rt_ci95_composite" in df_all:
plt.fill_between(
df_all.index,
df_all["Rt_ci95_composite"],
2 * df_all["Rt_MAP_composite"] - df_all["Rt_ci95_composite"],
alpha=0.2,
color="gray",
)
plt.hlines([0.9], *plt.xlim(), alpha=1, color="g")
plt.hlines([1.1], *plt.xlim(), alpha=1, color="gold")
plt.hlines([1.4], *plt.xlim(), alpha=1, color="r")
plt.xticks(rotation=30)
plt.grid(True)
plt.xlim(df_all.index.min() - timedelta(days=2), df_all.index.max() + timedelta(days=2))
plt.ylim(0.0, 3.0)
plt.ylabel("$R_t$", fontsize=16)
plt.legend()
plt.title(self.display_name, fontsize=16)
output_path = get_run_artifact_path(self.fips, RunArtifact.RT_INFERENCE_REPORT)
plt.savefig(output_path, bbox_inches="tight")
plt.close()
if df_all.empty:
logging.warning("Inference not possible for fips: %s", self.fips)
return df_all
@staticmethod
def ewma_smoothing(series, tau=5):
"""
Exponentially weighted moving average of a series.
Parameters
----------
series: array-like
Series to convolve.
tau: float
Decay factor.
Returns
-------
smoothed: array-like
Smoothed series.
"""
exp_window = signal.exponential(2 * tau, 0, tau, False)[::-1]
exp_window /= exp_window.sum()
smoothed = signal.convolve(series, exp_window, mode="same")
return smoothed
@staticmethod
def align_time_series(series_a, series_b):
"""
Identify the optimal time shift between two data series based on
maximal cross-correlation of their derivatives.
Parameters
----------
series_a: pd.Series
Reference series to cross-correlate against.
series_b: pd.Series
Reference series to shift and cross-correlate against.
Returns
-------
shift: int
A shift period applied to series b that aligns to series a
"""
shifts = range(-21, 5)
valid_shifts = []
xcor = []
np.random.seed(InferRtConstants.RNG_SEED) # Xcor has some stochastic FFT elements.
_series_a = np.diff(series_a)
for i in shifts:
series_b_shifted = np.diff(series_b.shift(i))
valid = ~np.isnan(_series_a) & ~np.isnan(series_b_shifted)
if len(series_b_shifted[valid]) > 0:
xcor.append(signal.correlate(_series_a[valid], series_b_shifted[valid]).mean())
valid_shifts.append(i)
if len(valid_shifts) > 0:
return valid_shifts[np.argmax(xcor)]
else:
return 0
@classmethod
def run_for_fips(cls, fips):
try:
engine = cls(fips)
return engine.infer_all()
except Exception:
logging.exception("run_for_fips failed")
return None
def replace_outliers(
x,
log,
local_lookback_window=InferRtConstants.LOCAL_LOOKBACK_WINDOW,
z_threshold=InferRtConstants.Z_THRESHOLD,
min_mean_to_consider=InferRtConstants.MIN_MEAN_TO_CONSIDER,
):
"""
Take a pandas.Series, apply an outlier filter, and return a pandas.Series.
This outlier detector looks at the z score of the current value compared to the mean and std
derived from the previous N samples, where N is the local_lookback_window.
For points where the z score is greater than z_threshold, a check is made to make sure the mean
of the last N samples is at least min_mean_to_consider. This makes sure we don't filter on the
initial case where values go from all zeros to a one. If that threshold is met, the value is
then replaced with the linear interpolation between the two nearest neighbors.
Parameters
----------
x
Input pandas.Series with the values to analyze
log
Logger instance
local_lookback_window
The length of the rolling window to look back and calculate the mean and std to baseline the
z score. NB: We require the window to be full before returning any result.
z_threshold
The minimum z score needed to trigger the replacement
min_mean_to_consider
Threshold to skip low n cases, especially the degenerate case where a long list of zeros
becomes a 1. This requires that the rolling mean of previous values must be greater than
or equal to min_mean_to_consider to be replaced.
Returns
-------
x
pandas.Series with any triggered outliers replaced
"""
# Calculate Z Score
r = x.rolling(window=local_lookback_window, min_periods=local_lookback_window, center=False)
m = r.mean().shift(1)
s = r.std(ddof=0).shift(1)
z_score = (x - m) / (s + EPSILON)
possible_changes_idx = np.flatnonzero(z_score > z_threshold)
changed_idx = []
changed_value = []
changed_snippets = []
for idx in possible_changes_idx:
if m[idx] > min_mean_to_consider:
changed_idx.append(idx)
changed_value.append(int(x[idx]))
slicer = slice(idx - local_lookback_window, idx + local_lookback_window)
changed_snippets.append(x[slicer].astype(int).tolist())
try:
x[idx] = np.mean([x.iloc[idx - 1], x.iloc[idx + 1]])
except IndexError: # Value to replace can be newest and fail on x[idx+1].
# If so, just use previous.
x[idx] = x[idx - 1]
if len(changed_idx) > 0:
log.info(
event="Replacing Outliers:",
outlier_values=changed_value,
z_score=z_score[changed_idx].astype(int).tolist(),
where=changed_idx,
snippets=changed_snippets,
)
return x
def run_state(state, states_only=False):
"""
Run the R_t inference for each county in a state.
Parameters
----------
state: str
State to run against.
states_only: bool
If True only run the state level.
"""
state_obj = us.states.lookup(state)
df = RtInferenceEngine.run_for_fips(state_obj.fips)
output_path = get_run_artifact_path(state_obj.fips, RunArtifact.RT_INFERENCE_RESULT)
if df is None or df.empty:
logging.error("Empty dataframe encountered! No RtInference results available for %s", state)
else:
df.to_json(output_path)
# Run the counties.
if not states_only:
all_fips = load_data.get_all_fips_codes_for_a_state(state)
# Something in here doesn't like multiprocessing...
rt_inferences = all_fips.map(lambda x: RtInferenceEngine.run_for_fips(x)).tolist()
for fips, rt_inference in zip(all_fips, rt_inferences):
county_output_file = get_run_artifact_path(fips, RunArtifact.RT_INFERENCE_RESULT)
if rt_inference is not None:
rt_inference.to_json(county_output_file)
def run_county(fips):
"""
Run the R_t inference for each county in a state.
Parameters
----------
fips: str
County fips to run against
"""
if not fips:
return None
df = RtInferenceEngine.run_for_fips(fips)
county_output_file = get_run_artifact_path(fips, RunArtifact.RT_INFERENCE_RESULT)
if df is not None and not df.empty:
df.to_json(county_output_file)
| StarcoderdataPython |
3436776 | <gh_stars>1-10
import copy
from typing import Union
import json
from urllib.parse import urlencode
from youtubesearchpython.core.requests import RequestCore
from youtubesearchpython.handlers.componenthandler import ComponentHandler
from youtubesearchpython.core.constants import *
class ChannelSearchCore(RequestCore, ComponentHandler):
response = None
responseSource = None
resultComponents = []
def __init__(self, query: str, language: str, region: str, searchPreferences: str, browseId: str, timeout: int):
super().__init__()
self.query = query
self.language = language
self.region = region
self.browseId = browseId
self.searchPreferences = searchPreferences
self.continuationKey = None
self.timeout = timeout
def sync_create(self):
self._syncRequest()
self._parseChannelSearchSource()
self.response = self._getChannelSearchComponent(self.response)
async def next(self):
await self._asyncRequest()
self._parseChannelSearchSource()
self.response = self._getChannelSearchComponent(self.response)
return self.response
def _parseChannelSearchSource(self) -> None:
try:
last_tab = self.response["contents"]["twoColumnBrowseResultsRenderer"]["tabs"][-1]
if 'expandableTabRenderer' in last_tab:
self.response = last_tab["expandableTabRenderer"]["content"]["sectionListRenderer"]["contents"]
else:
tab_renderer = last_tab["tabRenderer"]
if 'content' in tab_renderer:
self.response = tab_renderer["content"]["sectionListRenderer"]["contents"]
else:
self.response = []
except:
raise Exception('ERROR: Could not parse YouTube response.')
def _getRequestBody(self):
''' Fixes #47 '''
requestBody = copy.deepcopy(requestPayload)
requestBody['query'] = self.query
requestBody['client'] = {
'hl': self.language,
'gl': self.region,
}
requestBody['params'] = self.searchPreferences
requestBody['browseId'] = self.browseId
self.url = 'https://www.youtube.com/youtubei/v1/browse' + '?' + urlencode({
'key': searchKey,
})
self.data = requestBody
def _syncRequest(self) -> None:
''' Fixes #47 '''
self._getRequestBody()
request = self.syncPostRequest()
try:
self.response = request.json()
except:
raise Exception('ERROR: Could not make request.')
async def _asyncRequest(self) -> None:
''' Fixes #47 '''
self._getRequestBody()
request = await self.asyncPostRequest()
try:
self.response = request.json()
except:
raise Exception('ERROR: Could not make request.')
def result(self, mode: int = ResultMode.dict) -> Union[str, dict]:
'''Returns the search result.
Args:
mode (int, optional): Sets the type of result. Defaults to ResultMode.dict.
Returns:
Union[str, dict]: Returns JSON or dictionary.
'''
if mode == ResultMode.json:
return json.dumps({'result': self.response}, indent=4)
elif mode == ResultMode.dict:
return {'result': self.response}
| StarcoderdataPython |
31728 | <reponame>pasin30055/planning-evaluation-framework<filename>src/data_generators/pricing_generator.py
# Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Associate random pricing information to impression data."""
from typing import List
from typing import Tuple
class PricingGenerator:
"""Associates random pricing information to impression data.
This class, along with ImpressionGenerator, assists in the generation of
random PublisherDataFiles. The PricingGenerator will associate random
prices to the impressions generated by the ImpressionGenerator.
"""
def __init__(self):
"""Constructor for the PricingGenerator.
This would typically be overridden with a method whose signature
would specify the various parameters of the pricing distribution
to be generated.
"""
pass
def __call__(self, impressions: List[int]) -> List[Tuple[int, float]]:
"""Generate a random sequence of prices.
Args:
impressions: A list of user id's, with multiplicities, to which
pricing data is to be associated.
Returns:
A list of pairs (user_id, total_spend). The length of the list would
be the same as the list of impressions, and user_id's would be in 1-1
correspondences with those in the list of impressions. Associated to
each user_id is the total spend amount at which the impression would be
included in those shown by the advertiser.
"""
pass
| StarcoderdataPython |
267077 | <filename>cinder/tests/unit/volume/drivers/ibm/fake_pyxcli.py<gh_stars>1-10
# Copyright (c) 2016 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
""" Fake pyxcli-client for testing the driver without installing pyxcli"""
import mock
import sys
from cinder.tests.unit.volume.drivers.ibm import fake_pyxcli_exceptions
pyxcli_client = mock.Mock()
pyxcli_client.errors = fake_pyxcli_exceptions
pyxcli_client.events = mock.Mock()
pyxcli_client.mirroring = mock.Mock()
pyxcli_client.transports = fake_pyxcli_exceptions
pyxcli_client.mirroring.cg_recovery_manager = mock.Mock()
pyxcli_client.version = '1.1.5'
pyxcli_client.mirroring.mirrored_entities = mock.Mock()
sys.modules['pyxcli'] = pyxcli_client
sys.modules['pyxcli.events'] = pyxcli_client.events
sys.modules['pyxcli.mirroring'] = pyxcli_client.mirroring
| StarcoderdataPython |
6516888 | <reponame>clumio-code/clumio-python-sdk
#
# Copyright 2021. Clumio, Inc.
#
from clumioapi import api_helper
from clumioapi import configuration
from clumioapi.controllers import base_controller
from clumioapi.exceptions import clumio_exception
from clumioapi.models import create_backup_vmware_vm_v1_request
from clumioapi.models import list_vm_backups_response
from clumioapi.models import read_vm_backup_response
import requests
class BackupVmwareVmsV1Controller(base_controller.BaseController):
"""A Controller to access Endpoints for backup-vmware-vms resource."""
def __init__(self, config: configuration.Configuration) -> None:
super().__init__(config)
self.config = config
def list_backup_vmware_vms(
self, limit: int = None, start: str = None, filter: str = None
) -> list_vm_backups_response.ListVMBackupsResponse:
"""Returns a list of VMware virtual machines (VMs) that have been backed up by
Clumio. VM backups can be restored through the [POST
/restores/vmware/vms](#operation/restore-vmware-vm) endpoint.
Args:
limit:
Limits the size of the response on each page to the specified number of items.
start:
Sets the page number used to browse the collection.
Pages are indexed starting from 1 (i.e., `start=1`).
filter:
Narrows down the results to only the items that satisfy the filter criteria. The
following table lists
the supported filter fields for this resource and the filter conditions that can
be applied on those fields:
+-----------------+------------------+-----------------------------------------+
| Field | Filter Condition | Description |
+=================+==================+=========================================+
| start_timestamp | $lte, $gt | The timestamp value of when the backup |
| | | started. Represented in RFC-3339 |
| | | format. For example, filter={"start_tim |
| | | estamp":{"$lte":"1985-04-12T23:20:50Z"} |
| | | } |
+-----------------+------------------+-----------------------------------------+
| vcenter_id | $eq | The ID of the vCenter associated with |
| | | the backup VM. For example, |
| | | filter={"vcenter_id":{"$eq":"38"}} |
+-----------------+------------------+-----------------------------------------+
| vm_id | $eq | The 128-bit universally unique |
| | | identifier (UUID) of the backup VM to |
| | | be restored. This field must be set |
| | | with vcenter_id. For example, filter={" |
| | | vm_id":{"$eq":"50261484-4e52-373f-20ac- |
| | | 8a8c2145f196"},"vcenter_id":{"$eq":"38" |
| | | }} |
+-----------------+------------------+-----------------------------------------+
Returns:
ListVMBackupsResponse: Response from the API.
Raises:
ClumioException: An error occured while executing the API.
This exception includes the HTTP response code, an error
message, and the HTTP body that was received in the request.
"""
# Prepare query URL
_url_path = f'{self.config.base_path}/backups/vmware/vms'
_query_parameters = {}
_query_parameters = {'limit': limit, 'start': start, 'filter': filter}
# Prepare headers
_headers = {
'accept': 'application/backup-vmware-vms=v1+json',
}
# Execute request
try:
resp = self.client.get(_url_path, headers=_headers, params=_query_parameters)
except requests.exceptions.HTTPError as http_error:
errors = self.client.get_error_message(http_error.response)
raise clumio_exception.ClumioException(
'Error occurred while executing list_backup_vmware_vms.', errors
)
return list_vm_backups_response.ListVMBackupsResponse.from_dictionary(resp)
def create_backup_vmware_vm(
self, body: create_backup_vmware_vm_v1_request.CreateBackupVmwareVmV1Request = None
) -> object:
"""Performs an on-demand backup for the specified VM. The VM must be protected with
a policy that includes a service level agreement (SLA) configured for on-demand
backups.
Args:
body:
Returns:
object: Response from the API.
Raises:
ClumioException: An error occured while executing the API.
This exception includes the HTTP response code, an error
message, and the HTTP body that was received in the request.
"""
# Prepare query URL
_url_path = f'{self.config.base_path}/backups/vmware/vms'
_query_parameters = {}
# Prepare headers
_headers = {
'accept': 'application/backup-vmware-vms=v1+json',
}
# Execute request
try:
resp = self.client.post(
_url_path,
headers=_headers,
params=_query_parameters,
json=api_helper.to_dictionary(body),
)
except requests.exceptions.HTTPError as http_error:
errors = self.client.get_error_message(http_error.response)
raise clumio_exception.ClumioException(
'Error occurred while executing create_backup_vmware_vm.', errors
)
return resp
def read_backup_vmware_vm(self, backup_id: int) -> read_vm_backup_response.ReadVMBackupResponse:
"""Returns a representation of the specified VM backup.
Args:
backup_id:
Performs the operation on the backup with the specified ID.
Returns:
ReadVMBackupResponse: Response from the API.
Raises:
ClumioException: An error occured while executing the API.
This exception includes the HTTP response code, an error
message, and the HTTP body that was received in the request.
"""
# Prepare query URL
_url_path = f'{self.config.base_path}/backups/vmware/vms/{backup_id}'
_url_path = api_helper.append_url_with_template_parameters(
_url_path, {'backup_id': backup_id}
)
_query_parameters = {}
# Prepare headers
_headers = {
'accept': 'application/backup-vmware-vms=v1+json',
}
# Execute request
try:
resp = self.client.get(_url_path, headers=_headers, params=_query_parameters)
except requests.exceptions.HTTPError as http_error:
errors = self.client.get_error_message(http_error.response)
raise clumio_exception.ClumioException(
'Error occurred while executing read_backup_vmware_vm.', errors
)
return read_vm_backup_response.ReadVMBackupResponse.from_dictionary(resp)
| StarcoderdataPython |
5037262 | import numpy as np
x = np.array([[1],[2],[-3],[4]])
y = np.array([[-2],[4],[1],[0]])
z = np.array([[5],[-2],[3],[-7]])
print("2*x-3*y+z=\n", 2*x-3*y+z) | StarcoderdataPython |
6541509 | <gh_stars>1-10
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
"""
This module provides some functionality related to S-CODE vectors
such as reading, vector concetanation and so on.
"""
from nlp_utils import fopen
from collections import defaultdict as dd
from collections import Counter, namedtuple
import numpy as np
import sys
import gzip
SubstituteDistribution = namedtuple('SubstituteDistribution',
'substitute, probability')
def exclude_missing_subs_and_normalize(sub_probs, vectors):
sub_probs = [e for e in sub_probs if e.substitute in vectors]
total_prob = sum(e.probability for e in sub_probs)
return [(e.substitute, e.probability / total_prob) for e in sub_probs]
def get_X(embeddings):
return embeddings[0]
def get_Y(embeddings):
if len(embeddings.keys()) >= 2:
return embeddings[1]
else:
return embeddings[0]
def read_embedding_vectors(embedding_f, wordset=None, not_scode_f=False):
""" word_set is a set that indicates the tokens to fetch
from embedding file.
"""
if not_scode_f:
print >> sys.stderr, "INFO: %s not S-CODE Embedding" % embedding_f
else:
print >> sys.stderr, "INFO: %s S-CODE Embedding" % embedding_f
assert isinstance(wordset, set) or wordset == None, "wordset should be a set"
d = dd(lambda: dict())
for line in fopen(embedding_f):
line = line.split()
if not_scode_f:
typ = 0
w = line[0]
start = 1
count = 1
else:
typ = int(line[0][0])
w = line[0][2:]
start = 2
count = int(line[1])
if wordset is None or w in wordset :
d[typ][w] = (np.array(line[start:], dtype='float64'), count)
for typ in d:
print >> sys.stderr, "Total # of embeddings: %d for type: %d" % \
(len(d[typ]), typ)
return d
def concat_XY(embedding_d, subs):
d = dd(lambda : dict())
for X, s in subs.viewitems():
Xs = Counter(s)
for Y, count in Xs.viewitems():
d[X][Y] = (np.concatenate([embedding_d[0][X][0], embedding_d[1][Y][0]]), count)
return d
def concat_XYbar(embedding_d, subs, dim=25):
d = dict()
for X, s in subs.viewitems():
Y_bar = np.zeros(dim)
Xs = Counter(s)
for Y, count in Xs.viewitems():
Y_bar += embedding_d[1][Y][0] * count
Y_bar /= (Y_bar.dot(Y_bar) ** 0.5)
d[X] = (np.concatenate(embedding_d[0][X][0], Y_bar), 1)
return d
def concat_XYw(embedding_d1, embedding_d2, sub_vecs, target_word_strip_func=None):
""" Combined embedding, weighted by substitute probabilities (i.e, Volkan's method)
original_X_embeddings indicates that sub_vecs target words and embeddings are matches.
We need this because this method can concatenate embeddings that are not based on
the data which we get substitute distributions.
"""
func = target_word_strip_func
to_return = []
target_words = []
dim = len(embedding_d2[embedding_d2.keys()[0]][0])# Y vectors dimensionality
total_context_word_used = 0
total_context_word = 0
for target_word, sub_probs in sub_vecs:
# make it namedtuple: (substitute, probability)
sub_probs = map(SubstituteDistribution._make, sub_probs)
t = target_word
if func is not None:
t = func(target_word)
try:
X = embedding_d1[t][0] # [0] -> vector, [1] -> #of occurrences
except KeyError:
print >> sys.stderr, "no X embedding for %s" % t
continue # pass this on
total_context_word += len(sub_probs)
Y_bar = np.zeros(dim)
sub_probs = exclude_missing_subs_and_normalize(sub_probs, embedding_d2)
total_context_word_used += len(sub_probs)
for sub, prob in sub_probs:
try:
Y_bar += embedding_d2[sub][0] * prob
except KeyError:
print >> sys.stderr, "no Y embedding for %s" % sub
to_return.append(np.concatenate((X, Y_bar)))
target_words.append(target_word)
print >> sys.stderr, "Ratio of used_context word and total context " \
"word: %f" % \
(total_context_word_used / float(total_context_word))
return target_words, to_return
# def create_Yw(embeddings, sub_vecs):
# Can be obsolete. Try to solve it with cut -f1,2,100-
# to_return = []
# target_words = []
#
# dim = len(embeddings[embeddings.keys()[0]][0])# Y vectors dimensionality
# total_context_word_used = 0
# total_context_word = 0
# for target_word, sub_probs in sub_vecs:
# # make it namedtuple: (substitute, probability)
# sub_probs = map(SubstituteDistribution._make, sub_probs)
# total_context_word += len(sub_probs)
# Y_bar = np.zeros(dim)
# sub_probs = exclude_missing_subs_and_normalize(sub_probs, embeddings)
# total_context_word_used += len(sub_probs)
# for sub, prob in sub_probs:
# try:
# Y_bar += embeddings[sub][0] * prob
# except KeyError:
# print >> sys.stderr, "no Y embedding for %s" % sub
# to_return.append(Y_bar)
# target_words.append(target_word)
# print >> sys.stderr, "Ratio of used_context word and total context " \
# "word: %f" % \
# (total_context_word_used / float(total_context_word))
# return target_words, to_return
def write_vec(embedding_d, fn=None):
f = sys.stdout
if fn is not None:
f = gzip.open(fn, 'w')
for word, (vec, count) in embedding_d.viewitems():
f.write("{}\t{}\t{}".format(word, count, "\t".join(map(str, vec))))
if fn is not None:
f.close()
| StarcoderdataPython |
11223263 | <filename>django_email_foundation/management/commands/create_basic_structure.py
from django.core.management import BaseCommand
from django_email_foundation.api import DjangoEmailFoundation, Checks
class Command(BaseCommand):
help = 'Create the necessary folders inside the template path and it add a basic layout.'
def handle(self, *args, **options):
checks = Checks()
if not checks.templates_source_path():
self.stdout.write(self.style.ERROR('You must to define the templates source path.'))
return
self.stdout.write('Creating folders...')
engine = DjangoEmailFoundation()
error = engine.create_basic_structure()
if error:
self.stderr.write(self.style.ERROR(error))
else:
self.stdout.write(self.style.SUCCESS('Done!'))
| StarcoderdataPython |
1968422 | """
Define here anything what is needed for the package framework.classes.
""" | StarcoderdataPython |
6597002 | <reponame>michalinadengusiak/Learning-Python
# coding: utf-8
# In[17]:
# Here importing library time and setting variables
import time
word = "<PASSWORD>"
guesses = ''
turns = 13
# In[18]:
# Asking input and welcoming user
Name = input("What is your name ? ",)
print("Hello", Name , "! It is time for a hangman game!")
# In[19]:
# Giving time user to guess
time.sleep(1)
print("Start guessing...")
time.sleep(0.2)
# In[20]:
# This repeats if there are more turns than 0 then sets failed to 0
while turns > 0:
failed = 0
#This loop replaces the word and turns into underscore
for char in word:
if char in guesses:
print(char),
else:
print('_')
failed += 1
# If the user gussed underscores you won !
if failed == 0:
print('You won !!!')
break
print
guess = input('Please give in a character,')
guesses += guess
#If letter not in word subtract 1 turn
if guess not in word:
turns -= 1
print('Wrong!')
print('You have', turns, 'turns')
# If you lost all the turns you lost
if turns == 0:
print('YOU LOSE!!!!!!')
| StarcoderdataPython |
12825668 | <reponame>nebiutadele/2022-02-28-Alta3-Python
#!/usr/bin/python3
import requests
import json
# define the URL we want to use
GETURL = "http://validate.jsontest.com/"
def main():
# test data to validate as legal json
mydata = {"fruit": ["apple", "pear"], "vegetable": ["carrot"]}
## the next two lines do the same thing
## we take python, convert to a string, then strip out whitespace
#jsonToValidate = "json=" + str(mydata).replace(" ", "")
#jsonToValidate = f"json={ str(mydata).replace(' ', '') }"
## slightly different thinking
## user json library to convert to legal json, then strip out whitespace
jsonToValidate = f"json={ json.dumps(mydata).replace(' ', '') }"
# use requests library to send an HTTP GET
resp = requests.get(f"{GETURL}?{jsonToValidate}")
# strip off JSON response
# and convert to PYTHONIC LIST / DICT
respjson = resp.json()
# display our PYTHONIC data (LIST / DICT)
print(respjson)
# JUST display the value of "validate"
print(f"Is your JSON valid? {respjson['validate']}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
15224 | <filename>my_spotless_app/migrations/0002_alter_service_picture_url.py
# Generated by Django 3.2 on 2022-02-27 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_spotless_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='service',
name='picture_url',
field=models.TextField(),
),
]
| StarcoderdataPython |
6629475 | # Definition for a binary tree node.
"""
timecomplexity = O(n) spacecomplexity = O(n)
Using a dict to store prefix sum occurs so far
let sum = from root to cur node val's sum
check how many prefix sums equal to sum - target
then there are same number of subpath that subpathsum = target
remember that when return from subrecusive, need to cancel the curpathsum from dict because the path can only use one childnode
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def _getPathSum(self,root,target,curPathSum,dictSum):
if root is None:
return
curPathSum += root.val
oldPathSum = curPathSum -target
self.ans += dictSum.get(oldPathSum,0)
dictSum[curPathSum] = dictSum.get(curPathSum,0) + 1
self._getPathSum(root.left,target,curPathSum,dictSum)
self._getPathSum(root.right,target,curPathSum,dictSum)
dictSum[curPathSum] -= 1
def pathSum(self, root: TreeNode, sum: int) -> int:
self.ans = 0
dictSum = {0:1}
self._getPathSum(root,sum,0,dictSum)
return self.ans
#root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
A = Solution()
a = TreeNode(10)
b = TreeNode(5)
c = TreeNode(-3)
d = TreeNode(3)
e = TreeNode(2)
f = TreeNode(11)
g = TreeNode(3)
h = TreeNode(-2)
i = TreeNode(1)
a.left = b
a.right = c
b.left = d
b.right = e
c.right = f
d.left = g
d.right = h
e.right = i
print(A.pathSum(a,8)) | StarcoderdataPython |
5187399 | <reponame>saksham1115/mediagoblin<filename>mediagoblin/db/extratypes.py
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.types import TypeDecorator, Unicode, TEXT
import json
class PathTupleWithSlashes(TypeDecorator):
"Represents a Tuple of strings as a slash separated string."
impl = Unicode
def process_bind_param(self, value, dialect):
if value is not None:
if len(value) == 0:
value = None
else:
value = '/'.join(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = tuple(value.split('/'))
return value
# The following two classes and only these two classes is in very
# large parts based on example code from sqlalchemy.
#
# The original copyright notice and license follows:
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
class JSONEncoded(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class MutationDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutationDict."
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
| StarcoderdataPython |
9617300 | <gh_stars>10-100
from precise.skaters.covarianceutil.covrandom import random_band_cov
from precise.skaters.portfoliostatic.rpportfactory import rp_portfolio_factory
import numpy as np
from precise.skaters.locationutil.vectorfunctions import scatter
def test_rp():
cov = random_band_cov(n_dim=5)
print(np.shape(cov))
w = rp_portfolio_factory(cov=cov)
assert len(w)==np.shape(cov)[0],' dim mismatch '
contrib = w*np.dot( cov, w)
print(contrib)
def test_rp_diag():
cov = np.diag(list(np.ones(5))+list(2*np.ones(5)))
print(np.shape(cov))
w = rp_portfolio_factory(cov=cov, mu=0.02, risk_free_rate=0.02)
assert len(w)==np.shape(cov)[0],' dim mismatch '
contrib = w*np.dot( cov, w)
print(contrib)
if __name__=='__main__':
test_rp() | StarcoderdataPython |
6441739 | from UnitTest import UnitTest
from write import write, writebr
#from __pyjamas__ import debugger
class GeneratorTest(UnitTest):
def testSimpleStatement(self):
def fn():
yield 1
yield 2
g = fn()
self.assertEqual(g.next(), 1)
self.assertEqual(g.next(), 2)
for i, g in enumerate(fn()):
self.assertEqual(i, g-1)
def fn(n):
i = 0
yield i
i += 1
j = i
yield i
yield j
j *= 100
i += j
yield j
yield i
yield n + i
r = []
for i in fn(8):
r.append(i)
self.assertEqual(r, [0, 1, 1, 100, 101, 109])
a = A()
r = []
for i in a.fn():
r.append(i)
self.assertEqual(r, [1,2])
def testSimpleFor(self):
def fn():
for i in [1,2]:
yield i
g = fn()
self.assertEqual(g.next(), 1)
self.assertEqual(g.next(), 2)
for i, g in enumerate(fn()):
self.assertEqual(i, g-1)
def testSimpleWhile(self):
def fn(n):
i = 0
while i < n:
yield i
yield i * 10
i += 1
r = []
for i in fn(4):
r.append(i)
self.assertEqual(r, [0, 0, 1, 10, 2, 20, 3, 30])
def fn(n):
i = 0
while i < n:
yield i
i = 100
yield i
i += 1
r = []
for i in fn(50):
r.append(i)
self.assertEqual(r, [0, 100])
def fn():
y = 0
while y == 0:
y += 1
yield y
y += 1
yield y
r = []
for y in fn():
r.append(y)
self.assertEqual(r, [1, 2])
def testSimpleIfThenElse(self):
def fn(n):
while n < 3:
if n < 0:
yield "less than zero"
elif n == 0:
yield "zero"
elif n == 1:
yield "one"
else:
yield "more than one"
n += 1
r = []
for i in fn(-1):
r.append(i)
self.assertEqual(r, ['less than zero', 'zero', 'one', 'more than one'])
def testSimpleTryBody(self):
def fn():
i = 1
try:
yield i+1
yield i+2
except:
pass
r = []
for i in fn():
r.append(i)
self.assertEqual(r, [2,3])
def fn():
y = 0
while y == 0:
try:
y += 1
yield y
y += 1
yield y
finally:
y += 2
yield y
r = []
for i in fn():
r.append(i)
self.assertEqual(r, [1,2,4])
def testSimpleTryExceptElseFinally(self):
def f():
try:
yield 1
raise ZeroDivisionError('')
except:
yield 2
self.assertEqual(list(f()), [1, 2])
def f():
try:
yield 1
try:
yield 3
raise ZeroDivisionError('')
except:
yield 4
raise ZeroDivisionError('')
except:
yield 2
self.assertEqual(list(f()), [1, 3, 4, 2])
def fn(n):
for i in range(n):
try:
if i == 0:
yield "try %d" % i
elif i < 3:
raise TypeError(i)
elif i == 3:
raise KeyError(i)
except TypeError, e:
yield "TypeError %d (1)" % i
yield "TypeError %d (2)" % i
except:
yield "Exception %d (1)" % i
yield "Exception %d (2)" % i
else:
yield "else %d (1)" % i
yield "else %d (2)" % i
finally:
yield "finally %d (1)" % i
yield "finally %d (2)" % i
r = []
for i in fn(5):
r.append(i)
self.assertEqual(r, ['try 0',
'else 0 (1)',
'else 0 (2)',
'finally 0 (1)',
'finally 0 (2)',
'TypeError 1 (1)',
'TypeError 1 (2)',
'finally 1 (1)',
'finally 1 (2)',
'TypeError 2 (1)',
'TypeError 2 (2)',
'finally 2 (1)',
'finally 2 (2)',
'Exception 3 (1)',
'Exception 3 (2)',
'finally 3 (1)',
'finally 3 (2)',
'else 4 (1)',
'else 4 (2)',
'finally 4 (1)',
'finally 4 (2)'])
def fn(n):
for i in range(n):
try:
if i == 0:
yield "try %d" % i
elif i < 3:
raise TypeError(i)
elif i == 3:
raise KeyError(i)
else:
break
except TypeError, e:
yield "TypeError %d (1)" % i
yield "TypeError %d (2)" % i
except:
yield "Exception %d (1)" % i
yield "Exception %d (2)" % i
else:
yield "else %d (1)" % i
yield "else %d (2)" % i
finally:
yield "finally %d (1)" % i
yield "finally %d (2)" % i
r = []
for i in fn(5):
r.append(i)
self.assertEqual(r, ['try 0',
'else 0 (1)',
'else 0 (2)',
'finally 0 (1)',
'finally 0 (2)',
'TypeError 1 (1)',
'TypeError 1 (2)',
'finally 1 (1)',
'finally 1 (2)',
'TypeError 2 (1)',
'TypeError 2 (2)',
'finally 2 (1)',
'finally 2 (2)',
'Exception 3 (1)',
'Exception 3 (2)',
'finally 3 (1)',
'finally 3 (2)',
'finally 4 (1)',
'finally 4 (2)'])
def testSend(self):
def fn(value=None):
while True:
value = (yield value)
g = fn(1)
self.assertEqual(g.next(), 1)
self.assertEqual(g.next(), None)
self.assertEqual(g.send(2), 2)
def testThrow(self):
def fn():
yield 1
yield 2
g = fn()
try:
r = g.throw(TypeError, 'test1')
self.fail("Exception expected (1)")
except TypeError, e:
self.assertTrue(e, 'test1')
try:
r = g.next()
self.fail("StopIteration expected (1)")
except StopIteration:
self.assertTrue(True)
g = fn()
self.assertEqual(g.next(), 1)
try:
r = g.throw(TypeError, 'test2')
self.fail("Exception expected (2)")
except TypeError, e:
self.assertTrue(e, 'test2')
try:
r = g.next()
self.fail("StopIteration expected (2)")
except StopIteration:
self.assertTrue(True)
def fn():
try:
yield 1
yield 2
except:
yield 3
g = fn()
try:
r = g.throw(TypeError, 'test3')
self.fail("Exception expected (3)")
except TypeError, e:
self.assertTrue(e, 'test3')
g = fn()
self.assertEqual(g.next(), 1)
try:
r = g.throw(TypeError, 'test4')
self.assertEqual(r, 3)
except TypeError, e:
self.fail("No exception expected (4)")
try:
r = g.next()
self.fail("StopIteration expected (4)")
except StopIteration:
self.assertTrue(True)
def fn():
yield 1
raise StopIteration
yield 2
try:
for i in fn():
pass
except StopIteration:
pass
self.assertEqual(i, 1)
def testClose(self):
def fn():
yield 1
yield 2
g = fn()
try:
r = g.close()
self.assertEqual(r, None)
except:
self.fail("No exception expected (1a)")
try:
r = g.next()
self.fail("StopIteration expected (1)")
except StopIteration:
self.assertTrue(True)
try:
r = g.close()
self.assertEqual(r, None)
except StopIteration:
self.fail("No exception expected (1b)")
g = fn()
self.assertEqual(g.next(), 1)
try:
r = g.close()
self.assertEqual(r, None)
except TypeError, e:
self.fail("No exception expected (2)")
try:
r = g.next()
self.fail("StopIteration expected (2)")
except StopIteration:
self.assertTrue(True)
def fn():
try:
yield 1
except:
yield 2
g = fn()
try:
r = g.close()
self.assertEqual(r, None)
except TypeError, e:
self.fail("No exception expected (3)")
g = fn()
self.assertEqual(g.next(), 1)
try:
r = g.close()
self.fail("RuntimeError expected (4)")
except RuntimeError, e:
self.assertEqual(e[0], 'generator ignored GeneratorExit')
def testPEP255_fib(self):
# http://www.python.org/dev/peps/pep-0255/
def fib():
a, b = 0, 1
while 1:
yield b
a, b = b, a+b
g = fib()
r = []
for i in range(6):
r.append(g.next())
self.assertEqual(r, [1, 1, 2, 3, 5, 8])
def testPEP255_recursion(self):
me = None
def g():
i = me.next()
yield i
me = g()
try:
me.next()
self.fail("ValueError expected")
except ValueError, e:
self.assertEqual(e[0], 'generator already executing')
def testPEP255_return(self):
def f1():
try:
return
except:
yield 1
self.assertEqual(list(f1()), [])
def f2():
try:
raise StopIteration
except:
yield 42
self.assertEqual(list(f2()), [42])
def testPEP255_exceptionPropagation(self):
def f():
v = 1/0 # See issue #265
return {}['not-there']
def g():
yield f() # the zero division exception propagates
yield 42 # and we'll never get here
k = g()
try:
k.next()
self.fail("Exception expected")
except ZeroDivisionError, e:
self.assertTrue(True)
except:
self.assertTrue(True, "ZeroDivisionError expected")
try:
k.next()
self.fail("StopIteration expected")
except StopIteration:
self.assertTrue(True)
def testPEP255_tryExceptFinally(self):
def f():
try:
yield 1
try:
yield 2
raise ZeroDivisionError()
#1/0
yield 3 # never get here
except ZeroDivisionError:
yield 4
yield 5
raise
except:
yield 6
yield 7 # the "raise" above stops this
except:
yield 8
yield 9
try:
x = 12
finally:
yield 10
yield 11
self.assertEqual(list(f()), [1, 2, 4, 5, 8, 9, 10, 11])
def testPEP255_exampleRecursive(self):
global inorder
# A recursive generator that generates Tree labels in in-order.
def _inorder(t):
if t:
for x in inorder(t.left):
yield x
yield t.label
for x in inorder(t.right):
yield x
inorder = _inorder
# Show it off: create a tree.
s = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
t = tree(s)
# Print the nodes of the tree in in-order.
res = ''
for x in t:
res += x
self.assertEqual(s, res)
def testPEP255_exampleNonRecursive(self):
global inorder
# A non-recursive generator.
def _inorder(node):
stack = []
while node:
while node.left:
stack.append(node)
node = node.left
yield node.label
while not node.right:
try:
node = stack.pop()
except IndexError:
return
yield node.label
node = node.right
inorder = _inorder
# Show it off: create a tree.
s = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
t = tree(s)
# Print the nodes of the tree in in-order.
res = ''
for x in t:
res += x
self.assertEqual(s, res)
def testMixed(self):
def fn(value = None):
for i in [-1,0,1,2,3,4]:
if i < 0:
continue
elif i == 0:
yield 0
elif i == 1:
yield 1
i = 0
yield value
yield 2
else:
try:
v = i/value
except:
v = i
yield v
r = []
for i in fn():
r.append(i)
self.assertEqual(r, [0, 1, None, 2, 2, 3, 4])
def testGenExp(self):
g = (child for child in [1,2,3])
self.assertEqual(g.next(), 1)
self.assertEqual(g.next(), 2)
try:
g.throw(KeyError, 'test')
except KeyError, e:
self.assertEqual(e[0], 'test')
if any(isinstance(child, int) for child in [1,2,3]):
self.assertTrue(True)
else:
self.fail("any(isinstance(child, int) for child in [1,2,3])")
if any(isinstance(child, int) for child in ['1','2','3']):
self.fail("any(isinstance(child, int) for child in ['1','2','3'])")
else:
self.assertTrue(True)
# #269 - whoops! webkit barfs / infinite loop on this one
a = A()
g = (child for child in a.fn())
self.assertEqual(g.next(), 1)
self.assertEqual(g.next(), 2)
def testTupleReturn(self):
lst = []
for t in enumerate([0,1,2]):
lst.append(t)
self.assertEqual(lst, [(0,0), (1,1), (2,2)])
lst = [t for t in enumerate([0,1,2])]
self.assertEqual(lst, [(0,0), (1,1), (2,2)])
class A(object):
def fn(self):
yield 1
yield 2
inorder = None
# A binary tree class.
class Tree:
def __init__(self, label, left=None, right=None):
self.label = label
self.left = left
self.right = right
def __repr__(self, level=0, indent=" "):
s = level*indent + repr(self.label)
if self.left:
s = s + "\n" + self.left.__repr__(level+1, indent)
if self.right:
s = s + "\n" + self.right.__repr__(level+1, indent)
return s
def __iter__(self):
return inorder(self)
# Create a Tree from a list.
def tree(list):
n = len(list)
if n == 0:
return []
i = n / 2
return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
| StarcoderdataPython |
5142824 | import graphene
from cookbook.ingredients.schema import (
Queries as IngredientQueries,
Mutations as IngredientMutations,
)
from cookbook.recipes.schema import (
Queries as RecipeQueries,
Mutations as RecipeMutations,
)
from graphene_django.debug import DjangoDebug
class Query(
IngredientQueries, RecipeQueries, graphene.ObjectType,
):
debug = graphene.Field(DjangoDebug, name="_debug")
class Mutation(
IngredientMutations, graphene.ObjectType,
):
debug = graphene.Field(DjangoDebug, name="_debug")
schema = graphene.Schema(query=Query, mutation=Mutation)
| StarcoderdataPython |
3300226 | from context_free_grammar import Grammar
def top_down_parser(grammar, input_string):
output_queue = [grammar.start_symbol]
input_queue = input_string.split()
return parse(grammar, input_queue, output_queue)
def parse(grammar, input_queue, output_queue):
print 'Input', input_queue
print 'Output', output_queue
print
if output_queue!=[]:
if output_queue[0] in grammar.non_terminal:
for production in grammar.production[output_queue[0]]:
solution = parse(grammar, input_queue, production + output_queue[1:])
if solution:
break
return solution
elif output_queue[0] in grammar.terminal:
if output_queue[0] == input_queue[0]:
return parse(grammar, input_queue[1:], output_queue[1:])
else:
print 'backtrack'
return False
else:
return True
def main():
grammar = Grammar()
grammar.add_non_terminal('S', 'A', 'B')
grammar.add_terminal('a', 'b', 'c', 'd', '+', '*')
grammar.set_start_symbol('S')
grammar.add_production_rule('S->A + B|A * B', 'A->a | b', 'B->c|d')
grammar.show_grammar()
print top_down_parser(grammar, 'a * d')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3267510 | <gh_stars>0
"""Construct a maximum matching on a graph using the blossom algorithm.
Usage:
------
$ blossalg infile.csv [outfile.txt]
Description of infile:
The infile contains information on the number of nodes and the neighbours
of each node. This information is stored using a series of comma-delimited
binary-valued strings. Node N is identified by both the (N+1)th row and
column and a value of 1 indicates a node neighbour. By convention a node
cannot be a neighbour with itself.
For example, the infile of a three node graph where both node 0 and node 2
are neighbours of node 1 would look as follows:
0,1,0
1,0,1
0,1,0
Description of program output:
The program will compute the maximum matching of a user-supplied graph
using the blossom algorithm. The total number of matched nodes will be
output to screen. If an outfile is supplied (optional) the matched pairs
from this maximal matching will be saved to the file.
Description of outfile (optional):
In a user-supplied outfile, each node and its matched node will be stored
as 'node_number: matched_node_number'. The node number will correspond to
the node number from the infile (e.g. row 1 in the infile will represent
node 0 in the outfile). Each matched pair in the outfile will be separated
by a newline. By convention, unmatched nodes are not included in the
outfile.
Available options are:
-h, --help Show this help
Contact:
--------
- https://github.com/nenb
Version:
--------
- blossalg v1.0.0
"""
from __future__ import print_function
from __future__ import unicode_literals
# Standard library imports
from builtins import range
import re
import sys
import csv
from .blossom import Node, Graph
USAGE = "Usage: {} 'infile.csv' ['outfile.txt']".format(sys.argv[0])
args_pattern = re.compile(
r"""
^
(
(?P<HELP>-h|--help)|
((?P<ARG1>\w+\.csv))
(\s(?P<ARG2>\w+\.txt$))?
)
$
""",
re.VERBOSE,
)
def parse(arg_line):
args = {}
match_object = args_pattern.match(arg_line)
if match_object:
args = {
k: v
for k, v in list(match_object.groupdict().items())
if v is not None
}
return args
def read_infile(infile):
node_array = []
with open(infile) as csvfile:
for row in csv.reader(csvfile, delimiter=str(",")):
neighbours = [idx for idx, row in enumerate(row) if row == "1"]
node_array.append(neighbours)
if len(node_array) == 0:
raise SystemExit("Empty graph. Please supply a valid graph.")
return node_array
def compute_max_matching(node_array):
# Create node instances, fill node neighbours
nodelist = [Node() for _ in range(len(node_array))]
for idx, node in enumerate(node_array):
nodelist[idx].neighbors = [nodelist[node] for node in node]
# Create graph instance, construct graph
graph = Graph()
graph.nodes = {node.name: node for node in nodelist}
graph.compute_edges()
# Compute maximum matching
graph.find_max_matching()
return graph
def save_matched_pairs(matched_dict, outfile):
with open(outfile, "w") as textfile:
for pair in list(matched_dict.items()):
string = "{}:{}\n".format(pair[0], pair[1])
textfile.write(string)
def main():
args = parse(" ".join(sys.argv[1:]))
if not args:
raise SystemExit(USAGE)
if args.get("HELP"):
print(USAGE)
return
node_array = read_infile(args["ARG1"])
matched_graph = compute_max_matching(node_array)
# Multiple by two to convert number of matched pairs to matched nodes.
outstring = (
"""There are {} matched nodes in maximum matched graph.""".format(
int(2 * matched_graph.compute_size_matching())
)
)
print(outstring)
if args.get("ARG2"):
matched_dict = matched_graph.create_matching_dict()
save_matched_pairs(matched_dict, args["ARG2"])
if __name__ == "__main__":
main()
| StarcoderdataPython |
6557888 | from dataclasses import dataclass, field
@dataclass(eq=False)
class Node :
idnum : int
@dataclass
class Graph :
source : int
adjlist : dict
def PrimsMST(self):
priority_queue = { Node(self.source) : 0 }
added = [False] * len(self.adjlist)
min_span_tree_cost = 0
while priority_queue :
node = min(priority_queue, key=priority_queue.get)
cost = priority_queue[node]
del priority_queue[node]
if added[node.idnum] == False :
min_span_tree_cost += cost
added[node.idnum] = True
print("Node added: " + str(node.idnum) + ",cost: "+str(min_span_tree_cost))
for item in self.adjlist[node.idnum] :
adjnode = item[0]
adjcost = item[1]
if added[adjnode] == False :
priority_queue[Node(adjnode)] = adjcost
return min_span_tree_cost
def main() :
g1_edges_from_node = {}
g1_edges_from_node[0] = [ (1,1), (2,2), (3,1), (4,1), (5,2), (6,1) ]
g1_edges_from_node[1] = [ (0,1), (2,2), (6,2) ]
g1_edges_from_node[2] = [ (0,2), (1,2), (3,1) ]
g1_edges_from_node[3] = [ (0,1), (2,1), (4,2) ]
g1_edges_from_node[4] = [ (0,1), (3,2), (5,2) ]
g1_edges_from_node[5] = [ (0,2), (4,2), (6,1) ]
g1_edges_from_node[6] = [ (0,1), (2,2), (5,1) ]
g1 = Graph(0, g1_edges_from_node)
cost = g1.PrimsMST()
print("Cost of the minimum spanning tree in graph 1 : " + str(cost) +"\n")
g2_edges_from_node = {}
g2_edges_from_node[0] = [ (1,4), (2,1), (3,5) ];
g2_edges_from_node[1] = [ (0,4), (3,2), (4,3), (5,3) ];
g2_edges_from_node[2] = [ (0,1), (3,2), (4,8) ];
g2_edges_from_node[3] = [ (0,5), (1,2), (2,2), (4,1) ];
g2_edges_from_node[4] = [ (1,3), (2,8), (3,1), (5,3) ];
g2_edges_from_node[5] = [ (1,3), (4,3) ];
g2 = Graph(0, g2_edges_from_node)
cost = g2.PrimsMST()
print("Cost of the minimum spanning tree in graph 1 : " + str(cost))
if __name__ == "__main__" :
main() | StarcoderdataPython |
3218647 | # Instructions
# Use the Airflow context in the pythonoperator to complete the TODOs below. Once you are done, run your DAG and check the logs to see the context in use.
import datetime
import logging
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.S3_hook import S3Hook
def log_details(*args, **kwargs):
#
# TODO: Extract ds, run_id, prev_ds, and next_ds from the kwargs, and log them
# NOTE: Look here for context variables passed in on kwargs:
# https://airflow.apache.org/macros.html
#
ds = kwargs['ds'] # kwargs[]
run_id = kwargs['run_id'] # kwargs[]
previous_ds = kwargs['prev_ds'] # kwargs.get('')
next_ds = kwargs['next_ds'] # kwargs.get('')
logging.info(f"Execution date is {ds}")
logging.info(f"My run id is {run_id}")
if previous_ds:
logging.info(f"My previous run was on {previous_ds}")
if next_ds:
logging.info(f"My next run will be {next_ds}")
dag = DAG(
'lesson1.exercise5',
schedule_interval="@daily",
start_date=datetime.datetime.now() - datetime.timedelta(days=2)
)
list_task = PythonOperator(
task_id="log_details",
python_callable=log_details,
provide_context=True,
dag=dag
)
| StarcoderdataPython |
3412415 | <gh_stars>10-100
import mitmproxy.http
import json
from api.baidu import FaceDetect
from lib.shortid import Short_ID
face = FaceDetect()
spider_id = Short_ID()
class Fans():
def response(self, flow: mitmproxy.http.flow):
if "aweme/v1/user/?user_id" in flow.request.url:
user = json.loads(flow.response.text)["user"]
short_id = user["short_id"]
nickname = user['nickname']
uid = user["uid"]
avatar = user["avatar_larger"]["url_list"][0]
beauty = face(avatar)
short_id = spider_id(uid) if short_id == "0" else short_id
data = {
"short_id": short_id,
"nickname": nickname,
"uid": uid,
"beauty": beauty
}
print(data)
| StarcoderdataPython |
1822726 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
import tornado.gen
import tornado.ioloop
import time
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", user="tttt")
def post(self):
username = self.get_argument("username")
print username
password = self.get_argument("password")
self.write(username)
class SleepHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self, *args, **kwargs):
yield tornado.gen.Task(tornado.ioloop.IOLoop.instance().add_timeout, time.time()+17)
self.render("sleep.html", user="sleep")
class SeeHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
self.render("see.html", user="see")
class Test_2:
pass
| StarcoderdataPython |
9696449 | <reponame>wangyum/anaconda
# -*- coding: utf-8 -*-
'''
py-translate
============
A Translation Tool for Humans
'''
from .translator import *
from .languages import *
from .coroutines import *
from .tests import TestTranslator, TestLanguages
from .__version__ import __version__
from .__version__ import __build__
__title__ = 'py-translate'
__author__ = '<NAME>'
__license__ = 'Apache Software License Version 2.0'
__copyright__ = '(c) 2014 <NAME>'
| StarcoderdataPython |
3296952 | from dingtalker import DingTalker
if __name__ == "__main__":
client = "test"
d = DingTalker()
d.sendText(client, "Hi", atAll=True)
d.sendText(client, "大家都打了吗?", "18079637336")
d.sendMarkdown(client, "标题", "## 大家都打了吗?", "18079637336")
d.sendLink(client, "好消息!好消息!", "本群与百度成功达成合作关系,今后大家有什么不懂的可以直接百度搜索,不用再群里提问浪费时间啦!", "https://www.baidu.com",
"http://www.baidu.com/img/bd_logo1.png", atAll=True)
d.sendActionCard(client, "标题",
"\n### 乔布斯20年前想打造一间苹果咖啡厅,而它正是AppleStore的前身 @18079637336",
[
("内容不错", "https://www.cnblogs.com/kancy/p/13470386.html"),
("不感兴趣", "https://www.cnblogs.com/kancy/p/13912443.html")
], "18079637336")
d.sendFeedCard(client, [
("定位占用CPU较高的进程、线程、代码位置?", "https://www.cnblogs.com/kancy/p/13470386.html",
"https://img1.baidu.com/it/u=3312920655,3266355600&fm=26&fmt=auto"),
("浅谈我对DDD领域驱动设计的理解", "https://www.cnblogs.com/kancy/p/13425737.html"),
("单元测试之PowerMock", "https://www.cnblogs.com/kancy/p/13912443.html"),
("正确创建索引和索引失效", "https://www.cnblogs.com/kancy/p/13460140.html")
], atAll=True)
| StarcoderdataPython |
304240 | <filename>Python3/1385-Find-the-Distance-Value-Between-Two-Arrays/soln.py
class Solution:
def findTheDistanceValue(self, arr1: List[int], arr2: List[int], d: int) -> int:
return sum(all(abs(val1 - val2) > d for val2 in arr2) for val1 in arr1)
| StarcoderdataPython |
217649 | import json
import cli
(options, args) = cli.getOptions()
cowtan = open(options.source, "r")
output = {}
output['name'] = options.name
cowtanData = []
output['data'] = cowtanData
for line in cowtan.readlines():
currentMonth = {}
data = line.split()
yearMonth = data[0].split('/')
year = int(yearMonth[0])
if(len(yearMonth) == 2):
month = int(yearMonth[1])
currentMonth['month'] = month
currentMonth['year'] = year
currentMonth['mean'] = float(data[1])
currentMonth['totalUncertainty'] = float(data[2])
currentMonth['coverageUncertainty'] = float(data[3])
currentMonth['ensembleUncertainty'] = float(data[4])
cowtanData.append(currentMonth)
if(options.outputFile):
with open(options.outputFile, 'w') as outfile:
json.dump(output, outfile)
if(options.verbose):
print(json.dumps(output, indent=2))
cowtan.close()
| StarcoderdataPython |
3407809 | <gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import unittest
import numpy as np
import faiss
import math
class EvalIVFPQAccuracy(unittest.TestCase):
def get_dataset(self, small_one=False):
if not small_one:
d = 128
nb = 100000
nt = 15000
nq = 2000
else:
d = 32
nb = 10000
nt = 1000
nq = 200
np.random.seed(123)
# generate points in a low-dim subspace to make the resutls
# look better :-)
d1 = 16
q, r = np.linalg.qr(np.random.randn(d, d))
qc = q[:d1, :]
def make_mat(n):
return np.dot(
np.random.random(size=(nb, d1)), qc).astype('float32')
return (make_mat(nt), make_mat(nb), make_mat(nq))
def test_mm(self):
# trouble with MKL+fbmake that appears only at runtime. Check it here
x = np.random.random(size=(100, 20)).astype('float32')
mat = faiss.PCAMatrix(20, 10)
mat.train(x)
mat.apply_py(x)
def do_cpu_to_gpu(self, index_key):
ts = []
ts.append(time.time())
(xt, xb, xq) = self.get_dataset(small_one=True)
nb, d = xb.shape
index = faiss.index_factory(d, index_key)
if index.__class__ == faiss.IndexIVFPQ:
# speed up test
index.pq.cp.niter = 2
index.do_polysemous_training = False
ts.append(time.time())
index.train(xt)
ts.append(time.time())
# adding some ids because there was a bug in this case
index.add_with_ids(xb, np.arange(nb) * 3 + 12345)
ts.append(time.time())
index.nprobe = 4
Dref, Iref = index.search(xq, 10)
ts.append(time.time())
res = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(res, 0, index)
ts.append(time.time())
# Validate the layout of the memory info
mem_info = res.getMemoryInfo()
assert type(mem_info) == dict
assert type(mem_info[0]['FlatData']) == tuple
assert type(mem_info[0]['FlatData'][0]) == int
assert type(mem_info[0]['FlatData'][1]) == int
gpu_index.setNumProbes(4)
Dnew, Inew = gpu_index.search(xq, 10)
ts.append(time.time())
print('times:', [t - ts[0] for t in ts])
# Give us some margin of error
self.assertGreaterEqual((Iref == Inew).sum(), Iref.size - 50)
if faiss.get_num_gpus() == 1:
return
for shard in False, True:
# test on just 2 GPUs
res = [faiss.StandardGpuResources() for i in range(2)]
co = faiss.GpuMultipleClonerOptions()
co.shard = shard
gpu_index = faiss.index_cpu_to_gpu_multiple_py(res, index, co)
faiss.GpuParameterSpace().set_index_parameter(
gpu_index, 'nprobe', 4)
Dnew, Inew = gpu_index.search(xq, 10)
# 0.99: allow some tolerance in results otherwise test
# fails occasionally (not reproducible)
self.assertGreaterEqual((Iref == Inew).sum(), Iref.size * 0.99)
def test_cpu_to_gpu_IVFPQ(self):
self.do_cpu_to_gpu('IVF128,PQ4')
def test_cpu_to_gpu_IVFFlat(self):
self.do_cpu_to_gpu('IVF128,Flat')
def test_set_gpu_param(self):
index = faiss.index_factory(12, "PCAR8,IVF10,PQ4")
res = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(res, 0, index)
faiss.GpuParameterSpace().set_index_parameter(gpu_index, "nprobe", 3)
class ReferencedObject(unittest.TestCase):
d = 16
xb = np.random.rand(256, d).astype('float32')
nlist = 128
d_bin = 256
xb_bin = np.random.randint(256, size=(10000, d_bin // 8)).astype('uint8')
xq_bin = np.random.randint(256, size=(1000, d_bin // 8)).astype('uint8')
def test_proxy(self):
index = faiss.IndexReplicas()
for _i in range(3):
sub_index = faiss.IndexFlatL2(self.d)
sub_index.add(self.xb)
index.addIndex(sub_index)
assert index.d == self.d
index.search(self.xb, 10)
def test_resources(self):
# this used to crash!
index = faiss.index_cpu_to_gpu(faiss.StandardGpuResources(), 0,
faiss.IndexFlatL2(self.d))
index.add(self.xb)
def test_flat(self):
index = faiss.GpuIndexFlat(faiss.StandardGpuResources(),
self.d, faiss.METRIC_L2)
index.add(self.xb)
def test_ivfflat(self):
index = faiss.GpuIndexIVFFlat(
faiss.StandardGpuResources(),
self.d, self.nlist, faiss.METRIC_L2)
index.train(self.xb)
def test_ivfpq(self):
index_cpu = faiss.IndexIVFPQ(
faiss.IndexFlatL2(self.d),
self.d, self.nlist, 2, 8)
# speed up test
index_cpu.pq.cp.niter = 2
index_cpu.do_polysemous_training = False
index_cpu.train(self.xb)
index = faiss.GpuIndexIVFPQ(
faiss.StandardGpuResources(), index_cpu)
index.add(self.xb)
def test_binary_flat(self):
k = 10
index_ref = faiss.IndexBinaryFlat(self.d_bin)
index_ref.add(self.xb_bin)
D_ref, I_ref = index_ref.search(self.xq_bin, k)
index = faiss.GpuIndexBinaryFlat(faiss.StandardGpuResources(),
self.d_bin)
index.add(self.xb_bin)
D, I = index.search(self.xq_bin, k)
for d_ref, i_ref, d_new, i_new in zip(D_ref, I_ref, D, I):
# exclude max distance
assert d_ref.max() == d_new.max()
dmax = d_ref.max()
# sort by (distance, id) pairs to be reproducible
ref = [(d, i) for d, i in zip(d_ref, i_ref) if d < dmax]
ref.sort()
new = [(d, i) for d, i in zip(d_new, i_new) if d < dmax]
new.sort()
assert ref == new
def test_stress(self):
# a mixture of the above, from issue #631
target = np.random.rand(50, 16).astype('float32')
index = faiss.IndexReplicas()
size, dim = target.shape
num_gpu = 4
for _i in range(num_gpu):
config = faiss.GpuIndexFlatConfig()
config.device = 0 # simulate on a single GPU
sub_index = faiss.GpuIndexFlatIP(faiss.StandardGpuResources(), dim, config)
index.addIndex(sub_index)
index = faiss.IndexIDMap(index)
ids = np.arange(size)
index.add_with_ids(target, ids)
class TestShardedFlat(unittest.TestCase):
def test_sharded(self):
d = 32
nb = 1000
nq = 200
k = 10
rs = np.random.RandomState(123)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
index_cpu = faiss.IndexFlatL2(d)
assert faiss.get_num_gpus() > 1
co = faiss.GpuMultipleClonerOptions()
co.shard = True
index = faiss.index_cpu_to_all_gpus(index_cpu, co, ngpu=2)
index.add(xb)
D, I = index.search(xq, k)
index_cpu.add(xb)
D_ref, I_ref = index_cpu.search(xq, k)
assert np.all(I == I_ref)
del index
index2 = faiss.index_cpu_to_all_gpus(index_cpu, co, ngpu=2)
D2, I2 = index2.search(xq, k)
assert np.all(I2 == I_ref)
try:
index2.add(xb)
except RuntimeError:
pass
else:
assert False, "this call should fail!"
class TestGPUKmeans(unittest.TestCase):
def test_kmeans(self):
d = 32
nb = 1000
k = 10
rs = np.random.RandomState(123)
xb = rs.rand(nb, d).astype('float32')
km1 = faiss.Kmeans(d, k)
obj1 = km1.train(xb)
km2 = faiss.Kmeans(d, k, gpu=True)
obj2 = km2.train(xb)
print(obj1, obj2)
assert np.allclose(obj1, obj2)
class TestAlternativeDistances(unittest.TestCase):
def do_test(self, metric, metric_arg=0):
res = faiss.StandardGpuResources()
d = 32
nb = 1000
nq = 100
rs = np.random.RandomState(123)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
index_ref = faiss.IndexFlat(d, metric)
index_ref.metric_arg = metric_arg
index_ref.add(xb)
Dref, Iref = index_ref.search(xq, 10)
# build from other index
index = faiss.GpuIndexFlat(res, index_ref)
Dnew, Inew = index.search(xq, 10)
np.testing.assert_array_equal(Inew, Iref)
np.testing.assert_allclose(Dnew, Dref, rtol=1e-6)
# build from scratch
index = faiss.GpuIndexFlat(res, d, metric)
index.metric_arg = metric_arg
index.add(xb)
Dnew, Inew = index.search(xq, 10)
np.testing.assert_array_equal(Inew, Iref)
def test_L1(self):
self.do_test(faiss.METRIC_L1)
def test_Linf(self):
self.do_test(faiss.METRIC_Linf)
def test_Lp(self):
self.do_test(faiss.METRIC_Lp, 0.7)
class TestGpuRef(unittest.TestCase):
def test_gpu_ref(self):
# this crashes
dim = 256
training_data = np.random.randint(256, size=(10000, dim // 8)).astype('uint8')
centroids = 330
def create_cpu(dim):
quantizer = faiss.IndexBinaryFlat(dim)
return faiss.IndexBinaryIVF(quantizer, dim, centroids)
def create_gpu(dim):
gpu_quantizer = faiss.index_cpu_to_all_gpus(faiss.IndexFlatL2(dim))
index = create_cpu(dim)
index.clustering_index = gpu_quantizer
index.dont_dealloc_me = gpu_quantizer
return index
index = create_gpu(dim)
index.verbose = True
index.cp.verbose = True
index.train(training_data)
class TestInterleavedIVFPQLayout(unittest.TestCase):
def test_interleaved(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nq = 50
rs = np.random.RandomState(123)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
nlist = int(math.sqrt(nb))
sub_q = 16
bits_per_code = 8
nprobe = 4
config = faiss.GpuIndexIVFPQConfig()
config.alternativeLayout = True
idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2, config)
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFPQ(q, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2)
idx_gpu.train(xb)
idx_gpu.add(xb)
idx_cpu.train(xb)
idx_cpu.add(xb)
idx_gpu.nprobe = nprobe
idx_cpu.nprobe = nprobe
# Try without precomputed codes
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size - 10)
self.assertTrue(np.allclose(d_g, d_c))
# Try with precomputed codes (different kernel)
idx_gpu.setPrecomputedCodes(True)
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size - 10)
self.assertTrue(np.allclose(d_g, d_c))
def test_copy_to_cpu(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nq = 50
rs = np.random.RandomState(234)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
nlist = int(math.sqrt(nb))
sub_q = 16
bits_per_code = 8
nprobe = 4
config = faiss.GpuIndexIVFPQConfig()
config.alternativeLayout = True
idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2, config)
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFPQ(q, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2)
idx_gpu.train(xb)
idx_gpu.add(xb)
idx_gpu.copyTo(idx_cpu)
idx_gpu.nprobe = nprobe
idx_cpu.nprobe = nprobe
# Try without precomputed codes
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size - 10)
self.assertTrue(np.allclose(d_g, d_c))
# Try with precomputed codes (different kernel)
idx_gpu.setPrecomputedCodes(True)
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size - 10)
self.assertTrue(np.allclose(d_g, d_c))
def test_copy_to_gpu(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nq = 50
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
nlist = int(math.sqrt(nb))
sub_q = 16
bits_per_code = 8
nprobe = 4
config = faiss.GpuIndexIVFPQConfig()
config.alternativeLayout = True
idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2, config)
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFPQ(q, d, nlist, sub_q, bits_per_code, faiss.METRIC_L2)
idx_cpu.train(xb)
idx_cpu.add(xb)
idx_gpu.copyFrom(idx_cpu)
idx_gpu.nprobe = nprobe
idx_cpu.nprobe = nprobe
# Try without precomputed codes
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size - 10)
self.assertTrue(np.allclose(d_g, d_c))
# Try with precomputed codes (different kernel)
idx_gpu.setPrecomputedCodes(True)
d_g, i_g = idx_gpu.search(xq, 10)
d_c, i_c = idx_cpu.search(xq, 10)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size - 10)
self.assertTrue(np.allclose(d_g, d_c))
def make_t(num, d, clamp=False, seed=None):
rs = None
if seed is None:
rs = np.random.RandomState(123)
else:
rs = np.random.RandomState(seed)
x = rs.rand(num, d).astype(np.float32)
if clamp:
x = (x * 255).astype('uint8').astype('float32')
return x
class TestBruteForceDistance(unittest.TestCase):
def test_bf_input_types(self):
d = 33
k = 5
nb = 1000
nq = 10
xs = make_t(nb, d)
qs = make_t(nq, d)
res = faiss.StandardGpuResources()
# Get ground truth using IndexFlat
index = faiss.IndexFlatL2(d)
index.add(xs)
ref_d, ref_i = index.search(qs, k)
out_d = np.empty((nq, k), dtype=np.float32)
out_i = np.empty((nq, k), dtype=np.int64)
# Try f32 data/queries, i64 out indices
params = faiss.GpuDistanceParams()
params.k = k
params.dims = d
params.vectors = faiss.swig_ptr(xs)
params.numVectors = nb
params.queries = faiss.swig_ptr(qs)
params.numQueries = nq
params.outDistances = faiss.swig_ptr(out_d)
params.outIndices = faiss.swig_ptr(out_i)
faiss.bfKnn(res, params)
self.assertTrue(np.allclose(ref_d, out_d, atol=1e-5))
self.assertGreaterEqual((out_i == ref_i).sum(), ref_i.size)
# Try int32 out indices
out_i32 = np.empty((nq, k), dtype=np.int32)
params.outIndices = faiss.swig_ptr(out_i32)
params.outIndicesType = faiss.IndicesDataType_I32
faiss.bfKnn(res, params)
self.assertEqual((out_i32 == ref_i).sum(), ref_i.size)
# Try float16 data/queries, i64 out indices
xs_f16 = xs.astype(np.float16)
qs_f16 = qs.astype(np.float16)
xs_f16_f32 = xs_f16.astype(np.float32)
qs_f16_f32 = qs_f16.astype(np.float32)
index.reset()
index.add(xs_f16_f32)
ref_d_f16, ref_i_f16 = index.search(qs_f16_f32, k)
params.vectors = faiss.swig_ptr(xs_f16)
params.vectorType = faiss.DistanceDataType_F16
params.queries = faiss.swig_ptr(qs_f16)
params.queryType = faiss.DistanceDataType_F16
out_d_f16 = np.empty((nq, k), dtype=np.float32)
out_i_f16 = np.empty((nq, k), dtype=np.int64)
params.outDistances = faiss.swig_ptr(out_d_f16)
params.outIndices = faiss.swig_ptr(out_i_f16)
params.outIndicesType = faiss.IndicesDataType_I64
faiss.bfKnn(res, params)
self.assertGreaterEqual((out_i_f16 == ref_i_f16).sum(), ref_i_f16.size - 5)
self.assertTrue(np.allclose(ref_d_f16, out_d_f16, atol = 2e-3))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6482586 | from copy import copy
from pandas import concat, Index, MultiIndex, Series
from typing import Optional, Union, List, Dict
from survey.mixins.data_types.single_category_mixin import SingleCategoryMixin
class SingleCategoryStackMixin(object):
items: List[SingleCategoryMixin]
_item_dict: Dict[str, SingleCategoryMixin]
def stack(
self, name: str,
drop_na: bool = True,
name_index: Optional[str] = None,
key_index: Optional[str] = None,
number_index: Optional[str] = None,
number_mappings: Optional[Union[List[str], Dict[int, str]]] = None,
**kwargs
) -> SingleCategoryMixin:
"""
Stack the responses to each item in the group into a new item.
:param name: Name for the new item.
:param drop_na: Whether to drop rows where the respondent was not asked
the item.
:param name_index: Name of a new index column to create with values
corresponding to the name of the item the data
comes from.
:param number_index: Name of a new index column to create with values
corresponding to the number of the item the data
comes from.
:param key_index: Name of a new index column to create with values
corresponding to the item's key in the group.
:param number_mappings: List of string or dict of ints to strings to
convert number index values.
:param kwargs: Optional new attribute values to override in the new
item.
"""
if name == '':
raise ValueError('Name cannot be empty.')
# create index names
index_names = [self.items[0].data.index.name]
if name_index is not None:
index_names.append(name_index)
if key_index is not None:
index_names.append(key_index)
if number_index is not None:
index_names.append(number_index)
question_datas = []
item: SingleCategoryMixin
for item in self.items:
# create data
item_data = item.data
if drop_na:
item_data = item_data.dropna()
# create index
index_list = item_data.index.to_list()
if name_index is not None:
name_list = [item.name] * len(item_data)
else:
name_list = None
if key_index is not None:
key_list = [
[k for k in self._item_dict.keys()
if self._item_dict[k] is item][0]
] * len(item_data)
else:
key_list = None
if number_index is not None:
if number_mappings is None:
number_list = [self.items.index(item)] * len(item_data)
else:
number_list = [
number_mappings[self.items.index(item)]
] * len(item_data)
else:
number_list = None
if name_list is None and key_list is None and number_list is None:
item_data.index = Index(data=index_list, name=index_names[0])
else:
index_tuples = list(
zip(*[ix_list
for ix_list in [index_list, name_list,
key_list, number_list]
if ix_list is not None])
)
item_data.index = MultiIndex.from_tuples(
tuples=index_tuples, names=index_names
)
question_datas.append(item_data)
new_data = concat(question_datas, axis=0)
new_data = Series(data=new_data, name=name, index=new_data.index)
# copy question
new_question = copy(self.items[0])
new_question.name = name
new_question._data = new_data
for kw, arg in kwargs.items():
setattr(new_question, kw, arg)
return new_question
| StarcoderdataPython |
73909 | <filename>qlib/backtest/signal.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import abc
from typing import Dict, List, Text, Tuple, Union
import pandas as pd
from qlib.utils import init_instance_by_config
from ..data.dataset import Dataset
from ..data.dataset.utils import convert_index_format
from ..model.base import BaseModel
from ..utils.resam import resam_ts_data
class Signal(metaclass=abc.ABCMeta):
"""
Some trading strategy make decisions based on other prediction signals
The signals may comes from different sources(e.g. prepared data, online prediction from model and dataset)
This interface is tries to provide unified interface for those different sources
"""
@abc.abstractmethod
def get_signal(self, start_time, end_time) -> Union[pd.Series, pd.DataFrame, None]:
"""
get the signal at the end of the decision step(from `start_time` to `end_time`)
Returns
-------
Union[pd.Series, pd.DataFrame, None]:
returns None if no signal in the specific day
"""
class SignalWCache(Signal):
"""
Signal With pandas with based Cache
SignalWCache will store the prepared signal as a attribute and give the according signal based on input query
"""
def __init__(self, signal: Union[pd.Series, pd.DataFrame]):
"""
Parameters
----------
signal : Union[pd.Series, pd.DataFrame]
The expected format of the signal is like the data below (the order of index is not important and can be automatically adjusted)
instrument datetime
SH600000 2008-01-02 0.079704
2008-01-03 0.120125
2008-01-04 0.878860
2008-01-07 0.505539
2008-01-08 0.395004
"""
self.signal_cache = convert_index_format(signal, level="datetime")
def get_signal(self, start_time, end_time) -> Union[pd.Series, pd.DataFrame]:
# the frequency of the signal may not algin with the decision frequency of strategy
# so resampling from the data is necessary
# the latest signal leverage more recent data and therefore is used in trading.
signal = resam_ts_data(self.signal_cache, start_time=start_time, end_time=end_time, method="last")
return signal
class ModelSignal(SignalWCache):
def __init__(self, model: BaseModel, dataset: Dataset):
self.model = model
self.dataset = dataset
pred_scores = self.model.predict(dataset)
if isinstance(pred_scores, pd.DataFrame):
pred_scores = pred_scores.iloc[:, 0]
super().__init__(pred_scores)
def _update_model(self):
"""
When using online data, update model in each bar as the following steps:
- update dataset with online data, the dataset should support online update
- make the latest prediction scores of the new bar
- update the pred score into the latest prediction
"""
# TODO: this method is not included in the framework and could be refactor later
raise NotImplementedError("_update_model is not implemented!")
def create_signal_from(
obj: Union[Signal, Tuple[BaseModel, Dataset], List, Dict, Text, pd.Series, pd.DataFrame],
) -> Signal:
"""
create signal from diverse information
This method will choose the right method to create a signal based on `obj`
Please refer to the code below.
"""
if isinstance(obj, Signal):
return obj
elif isinstance(obj, (tuple, list)):
return ModelSignal(*obj)
elif isinstance(obj, (dict, str)):
return init_instance_by_config(obj)
elif isinstance(obj, (pd.DataFrame, pd.Series)):
return SignalWCache(signal=obj)
else:
raise NotImplementedError(f"This type of signal is not supported")
| StarcoderdataPython |
21338 | <reponame>vaedit/-<gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.header import Header
#发送邮件函数
def smail(sub,body):
tolist = ["<EMAIL>", "<EMAIL>"]
cc = ["<EMAIL>", "<EMAIL>"]
sender = '管理员 <<EMAIL>>'
subject = sub
smtpserver = 'smtp.163.com'
username = '<EMAIL>'
password = '<PASSWORD>'
messages = body
msg = MIMEText(messages, 'plain', 'utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = sender
msg['To'] = ','.join(tolist)
msg['Cc'] = ','.join(cc)
try:
s = smtplib.SMTP()
s.connect(smtpserver, '25')
s.login(username, password)
s.sendmail(sender, tolist+cc, msg.as_string())
s.quit()
print '邮件发送成功'
except Exception as e:
print '邮件发送失败:%s' %e
| StarcoderdataPython |
5102730 | <reponame>napjon/moocs_solution<gh_stars>10-100
def hand_rank(hand):
"Return a value indicating how high the hand ranks."
#counts is the count of each ranks; ranks lists corresponding ranks
#E.g. '7 T 7 9 7' => counts = (3, 1, 1); ranks = (7,10,9)# if it same count, ordered highest
groups = group(['--23456789TJKA'.index(r) for r,s in hand])# we make 1 group of ranks
counts, ranks= unzip(groups)#we divide groups into two parameter that defined above
if ranks == (14,5,4,3,2):#We also set a special unique case in straight
ranks = (5,4,3,2,1)
straight = len(set(ranks)) == 5 and max(ranks)-min(ranks) == 4#we define a function for straight
flush = len(set(([s for r,s in hand]))) == 1
return (9 if (5,)== counts else
8 if straight and flush else
7 if (4,1) == counts else
6 if (3,2) == counts else
5 if flush else
4 if straight else
3 if (3,1,1) == counts else
2 if (2,2,1) == counts else
1 if (2,1,1,1) == counts else
0), ranks
#in this example before we see that actually hand_ranks consist of 5 ranks
#and from 5 ranks in one hand, we could have 7 possible ways to define this combination
#the partition of the 5 ranks actually like above, where set of element that sum up to that integer
#and also we implement lexiographic, where we compared the highest, if not then go to lower and
#so on
#Also this is what we called REFACTORING.Where we define repeated statement to one simpler
#explanation
def group(items):
"Return a list of [(count, x)....] highest count first, then highest x first."
#Groups will receive list as input, count every element, store it to counts
#and the second one is ranks, which reprent by x,set of items
#This will output a list of pairs, a pair of count and it's ranks
groups = [(items.count(x), x) for x in set(items)]
return sorted(groups,reverse = True)
def unzip(pairs):return zip(*pairs)
#Now the unzip function is use to unzip it so the counts and ranks make a 2 separated groups
#Also the hand_rank definition can be used even simpler with the code below
def hand_rank(hand):
groups = group(['--23456789TJKA'.index(r) for r,s in hand])
counts, ranks= unzip(groups)
if ranks == (14,5,4,3,2):
ranks = (5,4,3,2,1)
straight = len(set(ranks)) == 5 and max(ranks)-min(ranks) == 4
flush = len(set(([s for r,s in hand]))) == 1
return max(count_rankings[counts], 4*straight + 5*flush), ranks
count_rankings = {(5,):10, (4,1):7, (3.2):6, (3,1,1):3, (2,2,1):2,
(2,1,1,1):1, (1,1,1,1,1):0}
#Now we are mapping the result that we have into the dictionary.
#This makes the return not to use 9 lines of code, but just one line of code
#4*straight means to convert boolean to integers. if not straight than 4*0 is 0.
#That also means to flush, if flush, 5*1 equal 5.
#While the dictionary ranking return a ranking based on counts, the other get handled
#by 4*straight + 5*flush
#Now as we now, straight flush in previous return 8, but still ok if its nine.
#We make that because we simply would want to add straight and flush, thus to 9
#We just bump the impossible which is 9 before, to 10. So 8 considered unavailable
#It's up to you, whether want to make concise and nice, or explicitly | StarcoderdataPython |
3280648 | """Coinbase helpers model"""
__docformat__ = "numpy"
import argparse
import binascii
from typing import Optional, Any, Union
import hmac
import hashlib
import time
import base64
import requests
from requests.auth import AuthBase
import gamestonk_terminal.config_terminal as cfg
class CoinbaseProAuth(AuthBase):
"""Authorize CoinbasePro requests. Source: https://docs.pro.coinbase.com/?python#signing-a-message"""
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or "")
message = message.encode("ascii")
try:
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
except binascii.Error:
signature_b64 = ""
request.headers.update(
{
"CB-ACCESS-SIGN": signature_b64,
"CB-ACCESS-TIMESTAMP": timestamp,
"CB-ACCESS-KEY": self.api_key,
"CB-ACCESS-PASSPHRASE": self.passphrase,
"Content-Type": "application/json",
}
)
return request
class CoinbaseRequestException(Exception):
"""Coinbase Request Exception object"""
def __init__(self, message: str):
super().__init__(message)
self.message = message
def __str__(self) -> str:
return f"CoinbaseRequestException: {self.message}"
class CoinbaseApiException(Exception):
"""Coinbase API Exception object"""
def __init__(self, message: str):
super().__init__(message)
self.message = message
def __str__(self) -> str:
return f"CoinbaseApiException: {self.message}"
def check_validity_of_product(product_id: str) -> str:
"""Helper method that checks if provided product_id exists. It's a pair of coins in format COIN-COIN.
If product exists it return it, in other case it raise an error. [Source: Coinbase]
Parameters
----------
product_id: str
Trading pair of coins on Coinbase e.g ETH-USDT or UNI-ETH
Returns
-------
str
pair of coins in format COIN-COIN
"""
products = [pair["id"] for pair in make_coinbase_request("/products")]
if product_id.upper() not in products:
raise argparse.ArgumentTypeError(
f"You provided wrong pair of coins {product_id}. "
f"It should be provided as a pair in format COIN-COIN e.g UNI-USD"
)
return product_id.upper()
def make_coinbase_request(
endpoint, params: Optional[dict] = None, auth: Optional[Any] = None
) -> dict:
"""Request handler for Coinbase Pro Api. Prepare a request url, params and payload and call endpoint.
[Source: Coinbase]
Parameters
----------
endpoint: str
Endpoint path e.g /products
params: dict
Parameter dedicated for given endpoint
auth: any
Api credentials for purpose of using endpoints that needs authentication
Returns
-------
dict
response from Coinbase Pro Api
"""
url = "https://api.pro.coinbase.com"
response = requests.get(url + endpoint, params=params, auth=auth)
if not 200 <= response.status_code < 300:
raise CoinbaseApiException(f"Invalid Authentication: {response.text}")
try:
return response.json()
except ValueError as e:
raise CoinbaseRequestException(f"Invalid Response: {response.text}") from e
def _get_account_coin_dict() -> dict:
"""Helper method that returns dictionary with all symbols and account ids in dictionary format. [Source: Coinbase]
Returns
-------
dict:
Your accounts in coinbase
{'1INCH': '0c29b708-d73b-4e1c-a58c-9c261cb4bedb', 'AAVE': '0712af66-c069-45b5-84ae-7b<PASSWORD>24', ..}
"""
auth = CoinbaseProAuth(
cfg.API_COINBASE_KEY, cfg.API_COINBASE_SECRET, cfg.API_COINBASE_PASS_PHRASE
)
accounts = make_coinbase_request("/accounts", auth=auth)
return {acc["currency"]: acc["id"] for acc in accounts}
def _check_account_validity(account: str) -> Union[str, Any]:
"""Helper methods that checks if given account exists. [Source: Coinbase]
Parameters
----------
account: str
coin or account id
Returns
-------
Union[str, Any]
Your account id or None
"""
accounts = _get_account_coin_dict()
if account in list(accounts.keys()):
return accounts[account]
if account in list(accounts.values()):
return account
print("Wrong account id or coin symbol")
return None
| StarcoderdataPython |
179911 | import asyncio
from zeroservices.backend.mongodb import MongoDBCollection
from . import _BaseCollectionTestCase
from ..utils import TestCase, _create_test_resource_service, _async_test
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class MongoDBCollectionTestCase(_BaseCollectionTestCase):
def setUp(self):
super(MongoDBCollectionTestCase, self).setUp()
self.database_name = 'test'
self.collection = MongoDBCollection(self.resource_name,
self.database_name)
self.collection.service = self.service
def tearDown(self):
super().tearDown()
self.collection.collection.drop()
class MongoDBTestCase(TestCase):
def setUp(self):
self.database_name = 'test'
self.resource_name = 'test_resource'
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
self.service = _create_test_resource_service('test_service', self.loop)
self.collection = MongoDBCollection(self.resource_name,
database_name=self.database_name)
self.collection.service = self.service
def tearDown(self):
self.collection.collection.drop()
@_async_test
def test_custom_database(self):
# Create a resource
resource_id = 'UUID1'
message_args = {'resource_data': {'kwarg_1': 1, 'kwarg_2': 2},
'resource_id': resource_id}
query = {'action': 'create'}
query.update(message_args)
result = yield from self.collection.on_message(**query)
self.assertEqual(result,
{'resource_id': 'UUID1'})
# Check that resource exists
resource_list = yield from self.collection.on_message(action='list')
self.assertEqual(resource_list,
[message_args])
# On a separate database, check that resource doesn't exists
collection2 = MongoDBCollection(self.resource_name,
database_name='other')
resource_list = yield from collection2.on_message(action='list')
self.assertEqual(resource_list, [])
| StarcoderdataPython |
3248807 | # Copyright (C) 2021-present notudope <https://github.com/notudope>
from time import sleep
from pyrogram import Client
from telethon.sessions import StringSession
from telethon.sync import TelegramClient
select = " "
help = """
Please go-to "my.telegram.org" (to get API_ID and API_HASH):
~ Login using your Telegram account.
~ Click on API development tools.
~ Create a new application, by entering the required details.
API_ID is "App api_id"
API_HASH is "App api_hash"
Or use:
- @apiscrapperbot
- @UseTGXBot
...
"""
docs = """
Telegram Client:
P -->> Pyrogram [https://docs.pyrogram.org]
T -->> Telethon [https://docs.telethon.dev]
"""
template = """
**This is your {} based UserBots** `STRING_SESSION`
⚠️ **DO NOT SHARE WITH ANYONE** ⚠️
```{}```
Generated by KASTA <3 @kastaid
"""
generated = """
Generated !! Check your Telegram "Saved Messages" to copy STRING_SESSION or copy from above.
~ Follow our channel https://t.me/kastaid
"""
print(help)
try:
API_ID = int(input("Enter your API_ID here: "))
except ValueError:
print(">> API_ID must be an integer.\nQuitting...")
exit()
API_HASH = input("Enter your API_HASH here: ")
print(docs)
while select != ("p", "t"):
select = input("Enter your required Client < P / T > : ").lower()
if select == "t":
print("\nTelethon selected!\nRunning script...")
sleep(1)
with TelegramClient(StringSession(), api_id=API_ID, api_hash=API_HASH) as client:
print("\nGenerating Telethon STRING_SESSION...")
string_session = client.session.save()
saved_messages = template.format("Telethon", string_session)
print("\n" + string_session + "\n")
client.send_message("me", saved_messages)
sleep(1)
print(generated)
break
elif select == "p":
print("\nPyrogram selected!\nRunning script...")
sleep(1)
with Client("UserBot", api_id=API_ID, api_hash=API_HASH) as client:
print("\nGenerating Pyrogram STRING_SESSION...")
string_session = client.export_session_string()
saved_messages = template.format("Pyrogram", string_session)
print("\n" + string_session + "\n")
client.send_message("me", saved_messages)
sleep(1)
print(generated)
break
else:
print("\nPlease only select P or T\n")
sleep(1.5)
| StarcoderdataPython |
206736 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.form.component
import typing
from abc import abstractproperty
from ..x_reset import XReset as XReset_71670917
from ..x_submit import XSubmit as XSubmit_7b060988
from .form import Form as Form_ca1d0c51
if typing.TYPE_CHECKING:
from ..form_submit_encoding import FormSubmitEncoding as FormSubmitEncoding_fdd50deb
from ..form_submit_method import FormSubmitMethod as FormSubmitMethod_e2a90d25
class HTMLForm(Form_ca1d0c51, XReset_71670917, XSubmit_7b060988):
"""
Service Class
This service specifies the special kind of Forms for HTML documents.
An HTMLForm fulfills the specification of forms in HTML. It supplies the possibility of submitting or resetting the contents of a form. For more information on HTML forms, please see the documentation of HTML.
See Also:
`API HTMLForm <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1form_1_1component_1_1HTMLForm.html>`_
"""
__ooo_ns__: str = 'com.sun.star.form.component'
__ooo_full_ns__: str = 'com.sun.star.form.component.HTMLForm'
__ooo_type_name__: str = 'service'
@abstractproperty
def SubmitEncoding(self) -> 'FormSubmitEncoding_fdd50deb':
"""
specifies the kind of encoding for submission.
"""
@abstractproperty
def SubmitMethod(self) -> 'FormSubmitMethod_e2a90d25':
"""
specifies the kind of submission.
"""
@abstractproperty
def TargetFrame(self) -> str:
"""
describes the frame, where to open the document specified by the TargetURL.
"""
@abstractproperty
def TargetURL(self) -> str:
"""
specifies the URL, which should be used for submission.
"""
__all__ = ['HTMLForm']
| StarcoderdataPython |
6502594 | <filename>code/default/launcher/bubble.py
# -*- encoding:utf-8 -*-
##############################
#
# 程序名:python桌面托盘气泡
# 文件名:bubble.py
# 功能 :实现桌面托盘气泡提示功能
# modify:by heyongman 2018.7.18
# program:python2.7
# 适用 :windowsXP -windows10
#
##############################
import sys
import os
import struct
import time
import win32con
from win32api import *
# Try and use XP features, so we get alpha-blending etc.
try:
from winxpgui import *
except ImportError:
from win32gui import *
curr_path = os.path.dirname(os.path.abspath(__file__))
icon_path = os.path.join(curr_path, "icon", "favicon.ico")
class MainWindow:
def __init__(self):
# 初始化变量
self.title = ""
self.msg = ""
self.duration = 5 # 延时5秒
self.hwnd = None
self.hinst = None
self.regOk = False
# self.creWind()
def creWind(self):
# Register the Window class.
wc = WNDCLASS()
self.hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbarDemo" # 字符串只要有值即可,下面3处也一样
wc.lpfnWndProc = {win32con.WM_DESTROY: self.OnDestroy} # could also specify a wndproc.
classAtom = RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow(classAtom, "Taskbar Demo", style,
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT,
0, 0, self.hinst, None
)
UpdateWindow(self.hwnd)
# start bubble
def startBubble(self, title, msg, duration=3, close=True):
if (self.hwnd == None):
self.creWind()
self.title = title
self.msg = msg
self.duration = duration
iconPathName = os.path.abspath(os.path.join(curr_path, "icon", "favicon.ico"))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = LoadImage(self.hinst, iconPathName, win32con.IMAGE_ICON, 0, 0, icon_flags)
except:
hicon = LoadIcon(0, win32con.IDI_APPLICATION)
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER + 20, hicon, "Balloon tooltip demo")
try:
Shell_NotifyIcon(NIM_ADD, nid)
except:
pass
# self.hwnd == None
# self.show_balloon(self.title, self.msg)
# nid = (self.hwnd, 0, flags, win32con.WM_USER + 20, hicon, "tooltip")
# Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY,
(self.hwnd, 0, NIF_INFO, win32con.WM_USER + 20,
hicon, "Balloon tooltip", msg, 200, title))
if close:
time.sleep(self.duration)
# ReleaseDC(self.hwnd,wc)
# DeleteDC(wc)
try:
DestroyWindow(self.hwnd)
# self.hwnd == None
except:
return None
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
PostQuitMessage(0) # Terminate the app.
bubble = MainWindow()
if __name__ == '__main__':
msgTitle = u"您有一条短消息"
msgContent = "hello python"
# msgTitle = msgTitle
bubble = MainWindow()
bubble.startBubble(msgTitle, msgContent)
| StarcoderdataPython |
6672210 | from .SentenceEvaluator import SentenceEvaluator
from .SimilarityFunction import SimilarityFunction
from .BinaryEmbeddingSimilarityEvaluator import BinaryEmbeddingSimilarityEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .TripletEvaluator import TripletEvaluator
from .MSEEvaluator import MSEEvaluator
from .AnalogyEvaluator import AnalogyEvaluator
from .BDIEvaluator import BDIEvaluator
from .PAFitEvaluator import PAFitEvaluator | StarcoderdataPython |
11215663 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import sys
import pathlib
def path_to_model_save_path(path): #从module所在的路径计算出模型保存的危指
mutation = os.path.basename(path).split('.')[0]
model_save_path=os.path.join(path, '../../..', 'newest_model_saved', mutation)
return model_save_path
def path_to_module(path,dir_level): #dir_level是该module所在的更目录在多少级以上
temp=path.split(r'/')
module_str=''
for i in range(len(temp)-dir_level-1,len(temp)-1):
module_str+=temp[i]+'.'
module_str += temp[-1].split('.')[0]
return module_str #输入path返回可以被动态import的字符串
def dynamic_import_from_abs_path(mutation_path):
cwd_path = pathlib.Path(mutation_path)
cwd_path=os.path.join(cwd_path.parent, "../../../..") #项目的根目录总是再mutation上四级
sys.path.append(cwd_path)
mutation_path=mutation_path.replace(r'//',r'/') #处理一些拼接错误,防止出现两个/
converted_str=path_to_module(mutation_path,3)
#converted_str=r'models.baseline_fbank.mutation.baseline_k_001_larger_hl_adam_deeper_nar_deeper'
imported_module=__import__(converted_str,fromlist=True) #全局动态import
sys.path.remove(cwd_path) #import完毕后将path删除防止影响 当前代码的正常运行
return imported_module
if __name__ == '__main__':
# vc_model_mutation_path = '$root_path/tf_project/a2m_VC/models/single_spk/mutation/baseline_with_emb.py'
# asr_model_mutation_path = '$root_path/tf_project/tensorflow_ASR/models/ASR_mel80/mutation/baseline_with_emb.py'
# sv_model_mutation_path = '$root_path/tf_project/tensorflow_ASR/models/SV_mel80/mutation/baseline_with_emb.py'
#
#
# asr_model=dynamic_import_from_abs_path(r'E:/python_project/tensorflow_ASR/models/SV_mel80/mutation/baseline.py')
print(path_to_model_save_path('./models/VCTK_mt/mutation/baseline_mvemb_larger_larger2.py'))
| StarcoderdataPython |
5072691 | from typing import Any, Dict, List, Set, Tuple, TypedDict
from permifrost.core.logger import GLOBAL_LOGGER as logger
from permifrost.core.permissions.types import PermifrostSpecSchema
from permifrost.core.permissions.utils.error import SpecLoadingError
class EntitySchema(TypedDict):
databases: Set[str]
database_refs: Set[str]
shared_databases: Set[str]
schema_refs: Set[str]
table_refs: Set[str]
roles: Set[str]
role_refs: Set[str]
users: Set[str]
warehouses: Set[str]
warehouse_refs: Set[str]
require_owner: bool
class EntityGenerator:
def __init__(self, spec: PermifrostSpecSchema):
self.spec = spec
self.entities: EntitySchema = {
"databases": set(),
"database_refs": set(),
"shared_databases": set(),
"schema_refs": set(),
"table_refs": set(),
"roles": set(),
"role_refs": set(),
"users": set(),
"warehouses": set(),
"warehouse_refs": set(),
"require_owner": False,
}
self.error_messages: List[str] = []
def inspect_entities(self) -> EntitySchema:
"""
Inspect a valid spec and make sure that no logic errors exist.
e.g. a role granted to a user not defined in roles
or a user given access to a database not defined in databases
"""
self.generate()
self.error_messages.extend(self.ensure_valid_entity_names(self.entities))
self.error_messages.extend(
self.ensure_valid_spec_for_conditional_settings(self.entities)
)
self.error_messages.extend(self.ensure_valid_references(self.entities))
if self.error_messages:
raise SpecLoadingError("\n".join(self.error_messages))
return self.entities
@staticmethod
def filter_grouped_entities_by_type(grouped_entities: List, type: str) -> List:
"""
Takes a list of grouped entities and filters them for a particular entity_type
For example:
filter_grouped_entities_by_type(grouped_entities, 'role') ->
{'accountadmin', 'demo', 'securityadmin', 'sysadmin', 'useradmin'}
"""
filtered_entities = [
entries for entity_type, entries in grouped_entities if entity_type == type
]
# Avoid returning a nested list if there are entities
if not filtered_entities:
return filtered_entities
else:
return filtered_entities[0]
@staticmethod
def group_spec_by_type(spec: PermifrostSpecSchema) -> List[Tuple[str, Any]]:
"""
Takes as input the Permifrost Spec and converts it to a grouped list
For example:
[('databases',
[{'demo': {'shared': False}}]),
('roles',
[
{'accountadmin':
{'warehouses': ['loading'],
'member_of': ['sysadmin', 'securityadmin']}},
{'securityadmin':
{'warehouses': ['loading'],
'member_of': ['useradmin']}},
]
...
]
"""
entities_by_type = [
(entity_type, entry)
for entity_type, entry in spec.items()
if entry and entity_type != "version"
]
return entities_by_type
def generate(self) -> EntitySchema:
"""
Generate and return a dictionary with all the entities defined or
referenced in the permissions specification file.
The xxx_refs entities are referenced by various permissions.
For example:
'roles' --> All the roles defined in the spec
'role_refs' --> All the roles referenced in a member_of permission
'table_refs' --> All the tables referenced in read/write privileges
or in owns entries
Returns a tuple (entities, error_messages) with all the entities defined
in the spec and any errors found (e.g. a user not assigned their user role)
"""
entities_by_type = self.group_spec_by_type(self.spec)
self.generate_roles(
self.filter_grouped_entities_by_type(entities_by_type, "roles")
)
self.generate_databases(
self.filter_grouped_entities_by_type(entities_by_type, "databases")
)
self.generate_warehouses(
self.filter_grouped_entities_by_type(entities_by_type, "warehouses")
)
self.generate_users(
self.filter_grouped_entities_by_type(entities_by_type, "users")
)
# Filter the owner requirement and set it to True or False
require_owner = [
entry
for entity_type, entry in entities_by_type
if entity_type == "require-owner"
]
self.entities["require_owner"] = require_owner == [True]
self.generate_implicit_refs_from_schemas()
self.generate_implicit_refs_from_tables()
# Add implicit references to DBs and Schemas.
# e.g. RAW.MYSCHEMA.TABLE references also DB RAW and Schema MYSCHEMA
return self.entities
def generate_implicit_refs_from_schemas(self):
"""Adds implicit database refs from schemas"""
for schema in self.entities["schema_refs"]:
name_parts = schema.split(".")
if name_parts[0] != "*":
self.entities["database_refs"].add(name_parts[0])
def generate_implicit_refs_from_tables(self):
"""Adds implicit db/schema refs from tables"""
for table in self.entities["table_refs"]:
name_parts = table.split(".")
if name_parts[0] != "*":
self.entities["database_refs"].add(name_parts[0])
if name_parts[1] != "*":
self.entities["schema_refs"].add(f"{name_parts[0]}.{name_parts[1]}")
def ensure_valid_entity_names(self, entities: EntitySchema) -> List[str]:
"""
Check that all entity names are valid.
Returns a list with all the errors found.
"""
error_messages = []
for db in entities["databases"].union(entities["database_refs"]):
name_parts = db.split(".")
if not len(name_parts) == 1:
error_messages.append(
f"Name error: Not a valid database name: {db}"
" (Proper definition: DB)"
)
for schema in entities["schema_refs"]:
name_parts = schema.split(".")
if (not len(name_parts) == 2) or (name_parts[0] == "*"):
error_messages.append(
f"Name error: Not a valid schema name: {schema}"
" (Proper definition: DB.[SCHEMA | *])"
)
for table in entities["table_refs"]:
name_parts = table.split(".")
if (not len(name_parts) == 3) or (name_parts[0] == "*"):
error_messages.append(
f"Name error: Not a valid table name: {table}"
" (Proper definition: DB.[SCHEMA | *].[TABLE | *])"
)
elif name_parts[1] == "*" and name_parts[2] != "*":
error_messages.append(
f"Name error: Not a valid table name: {table}"
" (Can't have a Table name after selecting all schemas"
" with *: DB.SCHEMA.[TABLE | *])"
)
return error_messages
def ensure_valid_references(self, entities: EntitySchema) -> List[str]:
"""
Make sure that all references are well defined.
Returns a list with all the errors found.
"""
error_messages = []
# Check that all the referenced entities are also defined
for database in entities["database_refs"]:
if database not in entities["databases"]:
error_messages.append(
f"Reference error: Database {database} is referenced "
"in the spec but not defined"
)
for role in entities["role_refs"]:
if role not in entities["roles"] and role != "*":
error_messages.append(
f"Reference error: Role {role} is referenced in the "
"spec but not defined"
)
for warehouse in entities["warehouse_refs"]:
if warehouse not in entities["warehouses"]:
error_messages.append(
f"Reference error: Warehouse {warehouse} is referenced "
"in the spec but not defined"
)
return error_messages
def ensure_valid_spec_for_conditional_settings(
self, entities: EntitySchema
) -> List[str]:
"""
Make sure that the spec is valid based on conditional settings such as require-owner
"""
error_messages = []
if entities["require_owner"]:
error_messages.extend(self.check_entities_define_owner())
return error_messages
def check_entities_define_owner(self) -> List[str]:
error_messages = []
entities_by_type = [
(entity_type, entry)
for entity_type, entry in self.spec.items()
if entry and entity_type in ["databases", "roles", "users", "warehouses"]
]
for entity_type, entry in entities_by_type:
for entity_dict in entry: # type: ignore
for entity_name, config in entity_dict.items():
if "owner" not in config.keys():
error_messages.append(
f"Spec Error: Owner not defined for {entity_type} {entity_name} and require-owner is set!"
)
return error_messages
def generate_warehouses(self, warehouse_list: List[Dict[str, Dict]]) -> None:
for warehouse_entry in warehouse_list:
for warehouse_name, _ in warehouse_entry.items():
self.entities["warehouses"].add(warehouse_name)
def generate_databases(self, db_list: List[Dict[str, Dict]]) -> None:
for db_entry in db_list:
for db_name, config in db_entry.items():
self.entities["databases"].add(db_name)
if "shared" in config:
if type(config["shared"]) == bool:
if config["shared"]:
self.entities["shared_databases"].add(db_name)
else:
logger.debug(
"`shared` for database {} must be boolean, skipping Role Reference generation.".format(
db_name
)
)
def generate_member_of_roles(self, config, role_name):
try:
if isinstance(config["member_of"], dict):
for member_role in config["member_of"].get("include", []):
self.entities["roles"].add(member_role)
for member_role in config["member_of"].get("exclude", []):
self.entities["roles"].add(member_role)
if isinstance(config["member_of"], list):
for member_role in config["member_of"]:
self.entities["roles"].add(member_role)
except KeyError:
logger.debug(
"`member_of` not found for role {}, skipping Role Reference generation.".format(
role_name
)
)
def generate_warehouse_roles(self, config, role_name):
try:
for warehouse in config["warehouses"]:
self.entities["warehouse_refs"].add(warehouse)
except KeyError:
logger.debug(
"`warehouses` not found for role {}, skipping Warehouse Reference generation.".format(
role_name
)
)
def generate_database_roles(self, config, role_name):
try:
for schema in config["privileges"]["databases"]["read"]:
self.entities["database_refs"].add(schema)
except KeyError:
logger.debug(
"`privileges.databases.read` not found for role {}, skipping Database Reference generation.".format(
role_name
)
)
try:
for schema in config["privileges"]["databases"]["write"]:
self.entities["database_refs"].add(schema)
except KeyError:
logger.debug(
"`privileges.databases.write` not found for role {}, skipping Database Reference generation.".format(
role_name
)
)
def generate_read_write_database_names(self, config):
read_databases = (
config.get("privileges", {}).get("databases", {}).get("read", [])
)
write_databases = (
config.get("privileges", {}).get("databases", {}).get("write", [])
)
return (read_databases, write_databases)
def generate_schema_roles(self, config, role_name):
read_databases, write_databases = self.generate_read_write_database_names(
config
)
try:
for schema in config["privileges"]["schemas"]["read"]:
self.entities["schema_refs"].add(schema)
schema_db = schema.split(".")[0]
if schema_db not in read_databases:
self.error_messages.append(
f"Privilege Error: Database {schema_db} referenced in "
"schema read privileges but not in database privileges "
f"for role {role_name}"
)
except KeyError:
logger.debug(
"`privileges.schemas.read` not found for role {}, skipping Schema Reference generation.".format(
role_name
)
)
try:
for schema in config["privileges"]["schemas"]["write"]:
self.entities["schema_refs"].add(schema)
schema_db = schema.split(".")[0]
if schema_db not in write_databases + read_databases:
self.error_messages.append(
f"Privilege Error: Database {schema_db} referenced in "
"schema write privileges but not in database privileges "
f"for role {role_name}"
)
except KeyError:
logger.debug(
"`privileges.schemas.write` not found for role {}, skipping Schema Reference generation.".format(
role_name
)
)
def generate_table_roles(self, config, role_name):
read_databases, write_databases = self.generate_read_write_database_names(
config
)
try:
for table in config["privileges"]["tables"]["read"]:
self.entities["table_refs"].add(table)
table_db = table.split(".")[0]
if table_db not in read_databases:
self.error_messages.append(
f"Privilege Error: Database {table_db} referenced in "
"table read privileges but not in database privileges "
f"for role {role_name}"
)
except KeyError:
logger.debug(
"`privileges.tables.read` not found for role {}, skipping Table Reference generation.".format(
role_name
)
)
try:
for table in config["privileges"]["tables"]["write"]:
self.entities["table_refs"].add(table)
table_db = table.split(".")[0]
if table_db not in write_databases + read_databases:
self.error_messages.append(
f"Privilege Error: Database {table_db} referenced in "
"table write privileges but not in database privileges "
f"for role {role_name}"
)
except KeyError:
logger.debug(
"`privileges.tables.write` not found for role {}, skipping Table Reference generation.".format(
role_name
)
)
def generate_ownership_roles(self, config, role_name):
try:
for schema in config["owns"]["databases"]:
self.entities["database_refs"].add(schema)
except KeyError:
logger.debug(
"`owns.databases` not found for role {}, skipping Database Reference generation.".format(
role_name
)
)
try:
for schema in config["owns"]["schemas"]:
self.entities["schema_refs"].add(schema)
except KeyError:
logger.debug(
"`owns.schemas` not found for role {}, skipping Schema Reference generation.".format(
role_name
)
)
try:
for table in config["owns"]["tables"]:
self.entities["table_refs"].add(table)
except KeyError:
logger.debug(
"`owns.tables` not found for role {}, skipping Table Reference generation.".format(
role_name
)
)
def generate_roles(self, role_list): # noqa
"""
Generate all of the role entities.
Also can populate the role_refs, database_refs,
schema_refs, table_refs & warehouse_refs
"""
for role_entry in role_list:
for role_name, config in role_entry.items():
self.entities["roles"].add(role_name)
self.generate_member_of_roles(config, role_name)
self.generate_warehouse_roles(config, role_name)
self.generate_database_roles(config, role_name)
self.generate_schema_roles(config, role_name)
self.generate_table_roles(config, role_name)
self.generate_ownership_roles(config, role_name)
def generate_user_fn(self, config, key, ref, user_name):
if key in config:
for item in config[key]:
self.entities[ref].add(item)
else:
logger.debug(
"`{}` not found for user {}, skipping Role Reference generation.".format(
key, user_name
)
)
def generate_users(self, user_list):
"""
Generate all of the user entities.
Also can populate the role_refs, database_refs, schema_refs & table_refs
"""
for user_entry in user_list:
for user_name, config in user_entry.items():
self.entities["users"].add(user_name)
self.generate_user_fn(config, "member_of", "role_refs", user_name)
self.generate_user_fn(
config.get("owns", {}), "database", "database_refs", user_name
)
self.generate_user_fn(
config.get("owns", {}), "schemas", "schema_refs", user_name
)
self.generate_user_fn(
config.get("owns", {}), "tables", "table_refs", user_name
)
| StarcoderdataPython |
1750158 | """Version information."""
__version__ = "1.30.0"
| StarcoderdataPython |
6504444 | <gh_stars>10-100
"""Sphinx configuration."""
import re
project = "grpc-interceptor"
author = "<NAME>"
copyright = f"2020, {author}"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
def setup(app):
"""Sphinx setup."""
app.connect("autodoc-skip-member", skip_member)
def skip_member(app, what, name, obj, skip, options):
"""Ignore ugly auto-generated doc strings from namedtuple."""
doc = getattr(obj, "__doc__", "") or "" # Handle when __doc__ is missing on None
is_namedtuple_docstring = bool(re.fullmatch("Alias for field number [0-9]+", doc))
return is_namedtuple_docstring or skip
| StarcoderdataPython |
9645920 | # Copyright (c) 2012, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""FRU related commands"""
from .. import Command
from pyipmi.fru import *
from pyipmi.tools.responseparser import ResponseParserMixIn
class FRUPrintCommand(Command, ResponseParserMixIn):
"""Describes the FRU get inventory area info IPMI command
This is "fru print" to ipmitool
"""
name = "FRU Print"
result_type = FRUPrintResult
response_fields = {
'FRU Device Description' : {},
'Board Mfg Date' : {},
'Board Mfg' : {},
'Board Product' : {},
'Board Serial' : {},
'Board Part Number' : {},
'Product Manufacturer' : {},
'Product Name' : {},
'Product Part Number' : {},
'Product Serial' : {}
}
ipmitool_args = ["fru", "print"]
class FRUReadCommand(Command, ResponseParserMixIn):
"""Describes the FRU read IPMI command
This is "fru read" to ipmitool
"""
name = "FRU Read"
result_type = FRUReadResult
response_fields = {
}
@property
def ipmitool_args(self):
return ["fru", "read", self._params['fru_id'],
self._params['filename']]
class FRUWriteCommand(Command, ResponseParserMixIn):
"""Describes the FRU write IPMI command
This is "fru write" to ipmitool
"""
name = "FRU Write"
result_type = FRUWriteResult
response_fields = {
}
@property
def ipmitool_args(self):
return ["fru", "read", self._params['fru_id'],
self._params['filename']]
class FRUUpgEKeyCommand(Command, ResponseParserMixIn):
"""Describes the FRU upgEKey ipmitool command
"""
name = "FRU UpgEkey"
result_type = FRUUpgEKeyResult
response_fields = {
}
@property
def ipmitool_args(self):
return ["fru", "upgEkey", self._params['fru_id'],
self._params['filename']]
class FRUShowCommand(Command, ResponseParserMixIn):
"""Describes the ekanalyzer frushow ipmitool command
"""
name = "FRU Show"
result_type = FRUShowResult
response_fields = {
}
@property
def ipmitool_args(self):
return ["ekanalyzer", 'frushow',
'oc=%s' % self._params['filename']]
fru_commands = {
'fru_print' : FRUPrintCommand,
'fru_read' : FRUReadCommand,
'fru_write' : FRUWriteCommand,
'fru_upg_e_key' : FRUUpgEKeyCommand
}
| StarcoderdataPython |
390530 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=redefined-outer-name,logging-format-interpolation
import logging
import argparse
import numpy as np
import onnx
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.WARN)
if __name__ == "__main__":
logger.info("Evaluating ONNXRuntime full precision accuracy and performance:")
parser = argparse.ArgumentParser(
description="SSD_Mobilenet_v1 fine-tune examples for image classification tasks.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model_path',
type=str,
help="Pre-trained ssd_mobilenet_v1 model on onnx file"
)
parser.add_argument(
'--benchmark',
action='store_true', \
default=False
)
parser.add_argument(
'--tune',
action='store_true', \
default=False,
help="whether quantize the model"
)
parser.add_argument(
'--config',
type=str,
help="config yaml path"
)
parser.add_argument(
'--output_model',
type=str,
help="output model path"
)
parser.add_argument(
'--mode',
type=str,
help="benchmark mode of performance or accuracy"
)
args = parser.parse_args()
model = onnx.load(args.model_path)
if args.benchmark:
from lpot.experimental import Benchmark, common
evaluator = Benchmark(args.config)
evaluator.model = common.Model(model)
evaluator(args.mode)
if args.tune:
from lpot.experimental import Quantization, common
quantize = Quantization(args.config)
quantize.model = common.Model(model)
q_model = quantize()
q_model.save(args.output_model)
if args.benchmark:
from lpot.experimental import Benchmark
evaluator = Benchmark(args.config)
evaluator.model = common.Model(q_model)
evaluator(args.mode)
| StarcoderdataPython |
6464953 | <filename>AnuOyeboade/phase1/BASIC/DAY3/Q22.py
"""
Write a Python program to count the number 4 in a given list.
"""
nums = str(input("Enter a list of comma seperated numbers"))
numbs = nums.split(",")
def list_4(numbs):
count = 0
for num in numbs:
if num==4:
count = count + 1
return count
print(list_4(numbs)) | StarcoderdataPython |
54811 | <reponame>nel215/lightgbm-mean-teacher
from chainer import reporter as reporter_module
from chainer.dataset import convert
from chainer.training.extensions import Evaluator
from chainer.backends import cuda
from sklearn.metrics import roc_auc_score
class AUCEvaluator(Evaluator):
def evaluate(self):
iterator = self._iterators['main']
target = self._targets['main']
if self.eval_hook:
self.eval_hook(self)
iterator.reset()
it = iterator
summary = reporter_module.DictSummary()
true_y = []
pred_y = []
for batch in it:
in_arrays = convert._call_converter(
self.converter, batch, self.device)
assert isinstance(in_arrays, tuple)
x, y = in_arrays
true_y.append(y)
pred_y.append(target.predict(x).data)
auc = roc_auc_score(
cuda.to_cpu(target.xp.concatenate(true_y, axis=0)),
cuda.to_cpu(target.xp.concatenate(pred_y, axis=0)),
)
summary.add({f'{self.name}/main/auc': auc})
return summary.compute_mean()
| StarcoderdataPython |
3566437 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["autocorr_function", "autocorr_integrated_time", "thermodynamic_integration_log_evidence"]
import numpy as np
def autocorr_function(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m]
def autocorr_integrated_time(x, axis=0, window=50, fast=False):
"""
Estimate the integrated autocorrelation time of a time series.
See `Sokal's notes <http://www.stat.unc.edu/faculty/cji/Sokal.pdf>`_ on
MCMC and sample estimators for autocorrelation times.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param window: (optional)
The size of the window to use. (default: 50)
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
# Compute the autocorrelation function.
f = autocorr_function(x, axis=axis, fast=fast)
# Special case 1D for simplicity.
if len(f.shape) == 1:
return 1 + 2*np.sum(f[1:window])
# N-dimensional case.
m = [slice(None), ] * len(f.shape)
m[axis] = slice(1, window)
tau = 1 + 2*np.sum(f[m], axis=axis)
return tau
def thermodynamic_integration_log_evidence(betas, logls):
"""
Thermodynamic integration estimate of the evidence.
:param betas: The inverse temperatures to use for the quadrature.
:param logls: The mean log-likelihoods corresponding to ``betas`` to use for
computing the thermodynamic evidence.
:return ``(logZ, dlogZ)``: Returns an estimate of the
log-evidence and the error associated with the finite
number of temperatures at which the posterior has been
sampled.
The evidence is the integral of the un-normalized posterior
over all of parameter space:
.. math::
Z \\equiv \\int d\\theta \\, l(\\theta) p(\\theta)
Thermodymanic integration is a technique for estimating the
evidence integral using information from the chains at various
temperatures. Let
.. math::
Z(\\beta) = \\int d\\theta \\, l^\\beta(\\theta) p(\\theta)
Then
.. math::
\\frac{d \\log Z}{d \\beta}
= \\frac{1}{Z(\\beta)} \\int d\\theta l^\\beta p \\log l
= \\left \\langle \\log l \\right \\rangle_\\beta
so
.. math::
\\log Z(1) - \\log Z(0)
= \\int_0^1 d\\beta \\left \\langle \\log l \\right\\rangle_\\beta
By computing the average of the log-likelihood at the
difference temperatures, the sampler can approximate the above
integral.
"""
if len(betas) != len(logls):
raise ValueError('Need the same number of log(L) values as temperatures.')
order = np.argsort(betas)[::-1]
betas = betas[order]
logls = logls[order]
betas0 = np.copy(betas)
if betas[-1] != 0:
betas = np.concatenate((betas0, [0]))
betas2 = np.concatenate((betas0[::2], [0]))
# Duplicate mean log-likelihood of hottest chain as a best guess for beta = 0.
logls2 = np.concatenate((logls[::2], [logls[-1]]))
logls = np.concatenate((logls, [logls[-1]]))
else:
betas2 = np.concatenate((betas0[:-1:2], [0]))
logls2 = np.concatenate((logls[:-1:2], [logls[-1]]))
logZ = -np.trapz(logls, betas)
logZ2 = -np.trapz(logls2, betas2)
return logZ, np.abs(logZ - logZ2)
| StarcoderdataPython |
3275419 | """
Notes in the following:
- `self` is an object
- Words beginning with uppercase are classes except
True and False which are booleans
- WORDS that are all uppercase are constants
- `a,b` = local variables
- `sd` = standard deviation
- `r()` is a random number 0 1
- `x,y` = decision, objective
- `xs,ys` = decisions, objectives
- `Eg` = the example class and an example is a pair xs,ys
- `eg` = an instance of class Eg
- `egs` = a list of examples
"""
# Todo
# - jc's repair heuristics?
# - todo. needs lists of facts and guesses
# = min change heuristic from jc
# - sample heuristic from vivek
# - is this de?
# - MOEA/D?
# - the de trick incremental domination within the archive
# - what about the moea/d trick?
# - the surroage trick: eval as few times as possible
#
# -----
# ## Constants
BATCH = True # if false, mutate archive as we go
LIVES = 9 # like a cat
COHEN = 0.5 # not other when it < standardDev*cohen
SOME = 100 # size of pop to explore
NEAR = SOME / 10 # size of local neighborhood in pop
FF = 0.5 # mutate 150% toward envy
CR = 1 # mutate all attributes towards the envy point
KISS = True # Keep It Simple
# -----
# ## Eg
# A thing that stores a list of x and y values.
class Eg:
id = 0
def __init__(self, xs=[], ys=[]):
Eg.id = self.id = Eg.id + 1
self.xs, self.ys = xs, ys
def __hash__(self): return self.id
def gap(self, other, stats):
return euclidian(self.xs, other.xs,stats)
def dominate(self, other, stats):
n = len(self.ys)
s1 = s2 = 0
for a,b,stat in zip(self.ys,other.ys,stats.ys):
w = stat.w
a = stat.norm(a)
b = stat.norm(b)
s1 -= 10**(w * (a - b) / n)
s2 -= 10**(w * (b - a) / n)
return s1 < s2
def dominates(self, lst, stats):
a = 0
for eg in lst
if self.dominate(eg, stats):
a += 1 / len(lst)
return a
# -----
# ## Main Loop
# de
# ga
def main():
egs = [eval(Eg()) for _ in range(SOME)]
b4 = None
while LIVES > 0
stats = Stats(egs)
if BATCH
for a in len(egs):
egs[a] = mutate(egs[a], egs, stats)
else:
egs = [mutate(eg, egs, stats) for eg in egs]
# here ?? reevaluate egs
LIVES -= 1
if b4:
if better(stats, b4): LIVES += 1
b4 = Stats(egs)
# -----
# ## Main Mutation Function
def mutate(old, egs, stats):
def meanSlope(envy, lst):
deltas = [0 for _ in lst[0].ys] # local slopes
for eg in lst:
step = eg.gap(envy)
out = [delta + (y1 - y2) / step / len(lst)
for y1, y2, delta in zip(envy.ys, eg.ys, deltas)]
return deltas
ignore = lambda: r() > SOME / len(egs)
want = lambda eg: not stats.same(old, eg) and dominate(eg, old)
best = lambda eg1,eg2: eg1 if eg1.dominate(eg2,stats) else eg2
better = [eg for eg in egs if not ignore() and want(eg)]
better = better.sorted(key=lambda eg: old.gap(eg,stats))
envy = reduce(best, better[:NEAR])
slope = meanSlope(envy, near)
mutant.xs = [x if r() > CR else x + FF * (a - x)
for x, a in zip(old.xs, envy.xs)]
dist = old.gap(mutant, stats) # how far to push
mutant.ys = [y + m * dist
for y, m in zip(old.ys, slope)]
return mutant if mutant.dominate(old) and
not stats.same(old, mutant) else old
# Maybe a bad idea. Ignore?
#
# bests=elite(egs, 0.5)
# best1=mid(bests) # thing near mid of non-dominated egs
# egs=[eg for eg in bests # closer to heaven than me
# if best1.gap(eg) < old.gap(best1)]
# envy=mid(egs).nearest(egs) # mid of the most heavenly
#
# def nearest(self, lst)
# out, best = lst[0], 10**10
# for a in lst:
# tmp = self.gap(a)
# if tmp < best:
# out, best = a, tmp
# return out
#
# ------
# ## Note
# The rest is "just" details.
# -----
# ## Misc Support Functions
def interpolate(x, xy):
x1, y1 = xy[0]
if x < x1: return y1
for x2, y2 in xy[1:]:
if x1 <= x < x2:
return y1 + (y2 - y1) * (x - x1) / (x2 - x1)
x1, y1 = x2, y2
return y2
def distfun(stats,cols=decs)
cache={}
def worker(eg1,eg2):
if k in cache: return cache[k]
out = cache[k] = eg.dist(eg2.cols,stats)
return out
retrun worker
def cache2(f): # XXX to be added below
cache = {}
def worker(a, b)
k = (a.id, b.id) if a.id <= b.id else (b.id, a.id)
if k in cache: return cache[k]
out = cache[k] = f(a, b)
return out
return worker
def cache(f): # XXX to be added below
cache = {}
def worker(a)
k = a.id
if k in cache: return cache[k]
out = cache[k] = f(a)
return out
return worker
def mid(lst):
out = Eg(xs=[0 for _ in lst[0].xs])
n = len(lst)
for a in lst:
out.xs = [b + x / n for b, x in zip(out.xs, a.xs)]
out.ys = [b + y / n for b, y in zip(out.xs, a.ys)]
return out
def elite(lst, most=0):
n = len(lst)
m = n * upper
for a in lst: a.dominates(lst)
return sorted(lst, key=dom)[most:]
def clone(x): return x.__class__()
seeBelow() = assert 0, "implemented by subclass"
# -----
# ## Stat
# Track inforamtion about examples
class Stat(object):
missing = "?"
def add(self, a): seeBelow()
def same(self, a, b): seeBelow()
def sames(self, a, b): seeBelow()
def gap(self, a, b): seeBelow()
def __add__(i, x): i.n += 1; self.add(x); return self
@classmethod
def ako(cls, pat):
me, no = cls.__name__, r"Stat(s)?"
if re.match(pat, me) and not re.match(no, me): return cls
for sub in cls.__subclasses__(): return sub.ako(pat)
# ### Stats
# Composites of Stat
import maths
def atLeast4(n) : return max(4, round(math.log(n,2)))
class Pole:
def __init__(i,dist,,eg1,eg2);
i.dist = dist
i.lo, i.hi, i.c = eg1, eg2, dist(eg1, eg2)
i.stat = Num()
def __add__(i,eg):
a = i.dist(i.lo, eg)
b = i.dist(i.hi, eg)
c = i.c
x = (a**2 + c**2 - b**2) / (2*c)
i.stat + x
return x, x > i.stat.mu
any = random.choice
class Poles:
def __init__(i,stats=stats,dist=dist,enough=4):
i.enough = enough
i.poles, i.pool = [], []
#i.dist = lambda eg1,eg2 : eg1.gap(eg2,stats)
#i.all = lambda : stats.all
#i.n = lambda : stats.n
i.bins = {}
def add(i, eg,n):
if i.poles
m = 0
for n,pole in enumerate(i.poles):
x,half = pole.add(eg)
m = 2**n + half
i.bins[m] = i.bins.get(m,[]) + [eg]
else:
i.pool += [eg]
if len(i.pool) == enough:
dist = lambda eg1,eg2: eg1.gap(eg2,stats)
i.poles += [Pole(dist,any(i.pool), any(i.pool)]
class Stats(Stat):
def __init__(self, eg0, egs=[]):
i.n = 0
i.all=[]
self.ys = [clone(y) for y in eg0.ys]
self.xs = [clone(y) for y in eg0.xs]
[self + eg for eg in egs]
def add(self, eg):
"visitor over composite: recurse on parts"
i.n += 1
i.all += [eg]
[stat + a for a, stat in zip(eg.xs, self.xs)]
[stat + a for a, stat in zip(eg.ys, self.ys)]
def same(self, eg1, eg2):
"visitor over composite: false if any part different"
for a, b, stat in zip(eg1.ys, eg2.ys, self.ys):
if not stat.same(a, b):
return False
return True
def gap(self, eg1, eg2):
"visitor over composite: sum gaps in parts"
sum = 0
for a, b, stat in zip(eg1.xs, eg2.xs, self.xs):
sum += stat.gap(a, b)
return (sum / len(eg1.xs)**0.5
def gaps(self)
"Caching trick to reduce #distance calcs"
return cahced(self.gap)
def sames(self, a, b):
assert 0, "not defined for sets of stats"
# ### Sym
# Track info on symbols
class Sym(Stat):
def __init__(self): self.w.=1; self.counts={}, self.n=0
def add(self, a): self.counts[a]=self.counts.get(a, 0) + 1
def same(self, a, b): return a is b
def better(self, other): return False
def gap(self, a, b):
if a is missing or b is missing: return 1
return 0 if a is b else 1
def sames(self, other):
def countsequal(obs, xpect):
x2, df=0, -1
for k in xpect:
df += 1
e=xpect[k]
o=obs.get(k, 0)
x2 += (o - e)**2 / e
critical=interpolate(df, [ # 95% confidence
(1, 3.841), (5, 11.070),
(10, 18.307), (15, 24.996), (20, 31.410),
(30, 43.66), (60, 79.08)])
return x2 <= critical
return countsequal(self.counts, other.counts)
# ### Num
# Track info on numbers
class Num(Stat):
def __init__(self):
self.n, self.mu, self.sd, self.m2=0, 0, 0, 0
self.lo, self.hi, self.w=10**32, -10**32, 0
def same(self, a, b):
return abs(a - b) <= self.sd * COHEN
def better(self, them):
return not self.sames(them) and \
(self.mu < them.mu if i.w < 0 else self.mu > them.mu)
def sames(self, them):
small=0.38
def ttest():
df=min(self.n - 1, them.n - 1)
critical=interpolate(df, [
(1, 6.314), (5, 2.015), (10, 1.812), (15, 1.753),
(20, 1.725), (30, 1.697), (60, 1.671)])
return (abs(i.mu - j.mu) /
((self.sd / i.n + them.sd / j.n)**0.5)) < critical
def hedges():
num=(self.n - 1) * self.sd**2 + (them.n - 1) * them.sd**2
denom=(self.n - 1) + (them.n - 1)
sp=(num / denom)**0.5
delta=abs(self.mu - them.mu) / sp
correction=1 - 3.0 / (4 * (self.n + them.n - 2) - 1)
return delta * correction < small
return hedges() or ttest()
def gap(self, a, b):
def norm(self, a):
return a if a is "?"else
(a - self.lo) / (self.hi - self.lo + 10**-32)
if a is "?" and b is "?": return 1
a=self.norm(a)
b=self.norm(b)
if a is "?": a=0 if b > 0.5 else 1
if b is "?": b=0 if a > 0.5 else 1
return (a - b)**2
def add(self, a):
d=a - self.mu
self.mu=self.mu + d / self.n
self.m2=self.m2 + d * (a - self.mu)
self.sd=(self.m2 / (self.n - 1 + 0.0001))**0.5
if a < self.lo: self.lo=a
if a > self.hi: self.hi=1
| StarcoderdataPython |
11283717 | <filename>cutout/util.py
#!/usr/bin/python
#-*- coding:utf8 -*-
import sys,re
import urllib.parse as urlparse
## 补全不足
# @side 填充位置 left
def fillside(stuff,width=None,fill=' ',side='left'):
if not width or not isinstance(width,int):
return stuff
stuff = str(stuff)
w = len(stuff)
if w > width:
return num
fillstr = fill * (width-w)
if side=='left':
return fillstr+stuff
elif side=='right':
return stuff+fillstr
else:
return stuff
## 限定数值范围
def rangable(num,low=None,top=None):
if low and num<low:
return low
elif top and num>top:
return top
else:
return num
## 解析命令行参数
# @kr 去掉key里的 “-” 符号
def parse_argv(argv, kr='-'):
#argv = argv[1:] # 去除文件名
leg = len(argv)
num = -1
redict = {}
#redict = dict( (k,v) for k in arg )
while True: # 循环获取参数
num += 1
if num>=leg: break
if num%2: continue
k = argv[num].replace(kr,'')
v = argv[num+1] if num+1<leg else ''
redict[k] = v
return redict
## 将计时器"时:分:秒"字符串转换为秒数间隔
def time2sec(sTime):
leg = len(sTime)
if leg<=5: #小时位补齐
sTime = '0:'+sTime
p="^([0-9]+):([0-5][0-9]):([0-5][0-9])$"
cp=re.compile(p)
try:
mTime=cp.match(sTime)
except TypeError:
return "[InModuleError]:time2sec(sTime) invalid argument type"
if mTime:
t = list(map(int,mTime.group(1,2,3)))
return 3600*t[0]+60*t[1]+t[2]
else:
return "[InModuleError]:time2sec(sTime) invalid argument value"
## 将秒数间隔转换为计时器"时:分:秒"字符串
# @fillzero 是否补全0位
# @fillhour 是否补全小时位
def sec2time(iItv,fillzero=True,fillhour=False):
if type(iItv)==type(1):
h=int(iItv/3600)
sUp_h=iItv-3600*h
m=int(sUp_h/60)
sUp_m=sUp_h-60*m
s=int(sUp_m)
time = (m,s)
if h>0 or fillhour: time = (h,m,s)
def fill_zero(num):
if num<10:
return '0'+str(num)
return str(num)
if not fillzero: fill_zero = str
return ":".join(map(fill_zero,time))
else:
return "[InModuleError]:sec2time(iItv) invalid argument type"
## url编码
def urlencode(stuff) :
if isinstance(stuff, dict):
return urlparse.urlencode(stuff)
elif isinstance(stuff, str):
return urlparse.quote(stuff)
## url解码
def urldecode(str) :
return urlparse.unquote(str)
| StarcoderdataPython |
231910 | <filename>translation/test_service.py
import sys
import grpc
# import the generated classes
import services.service_spec.translate_pb2_grpc as grpc_bt_grpc
import services.service_spec.translate_pb2 as grpc_bt_pb2
from services import registry
with open("example_de_article.txt", "r") as f:
TEST_TEXT = f.read()
if __name__ == "__main__":
try:
test_flag = False
if len(sys.argv) == 2:
if sys.argv[1] == "auto":
test_flag = True
endpoint = input("Endpoint (localhost:{}): ".format(registry["translate_server"]["grpc"])) if not test_flag else ""
if endpoint == "":
endpoint = "localhost:{}".format(registry["translate_server"]["grpc"])
grpc_method = input("Method (translate): ") if not test_flag else "translate"
text_content = input("Text: ") if not test_flag else TEST_TEXT
# open a gRPC channel
channel = grpc.insecure_channel("{}".format(endpoint))
request = grpc_bt_pb2.Request(
text=text_content,
source_language="de",
target_language="en"
)
stub = grpc_bt_grpc.TranslationStub(channel)
if grpc_method == "translate":
response = stub.translate(request)
print("Translation:", response.translation)
if len(response.translation) < 1:
exit(1)
else:
print("Invalid method!")
exit(1)
except Exception as e:
print(e)
exit(1)
| StarcoderdataPython |
1669125 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 <NAME> ( <EMAIL> )
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
'''
For input list of sequence compute distance between them by similar kmers.
Parameters:
k - kmer length
'''
import os
from trseeker.seqio.tab_file import sc_iter_tab_file
from trseeker.models.trf_model import TRModel
from trseeker.tools.ngrams_tools import *
from trseeker.settings import NGRAM_N, NGRAM_LENGTH
from collections import defaultdict
from trseeker.seqio.tr_file import get_all_trf_objs
class KmerBasedDistance(object):
SKIP_N = True
verbose = False
def __init__(self, data, k=NGRAM_LENGTH, verbose=False):
self.data = data
self.kmers = []
self.kmer2p = {}
self.pointer = -1
self.k = k
self.tf = defaultdict(int)
self.df = defaultdict(int)
self.p2docids = defaultdict(list)
self.p2doctfs = defaultdict(list)
self.p2docnfs = defaultdict(list)
self.N = len(self.data)
self.D = defaultdict(float)
if self.N>100 or verbose:
self.verbose = True
def count_kmers(self, docid):
''' Update tf and df data with k-mers from given sequence.
'''
# import cProfile, pstats, io
# pr = cProfile.Profile()
# pr.enable()
sequence = self.data[docid]
_local_tf = defaultdict(int)
self.local_nf = {}
self.local_tf = defaultdict(int)
local_seen = {}
n = float(len(sequence))
for i in range(0, len(sequence) - self.k + 1):
kmer = sequence[i:i + self.k]
if self.SKIP_N and 'n' in kmer:
continue
_local_tf[kmer] += 1
for kmer in _local_tf:
if not kmer in self.kmers:
self.pointer += 1
self.kmer2p[kmer] = self.pointer
self.kmers.append(kmer)
p = self.kmer2p[kmer]
if not p in local_seen:
self.df[p] += 1
local_seen[p] = True
self.tf[p] += _local_tf[kmer]
self.local_tf[p] = _local_tf[kmer]
for p in self.local_tf:
self.local_nf[p] = self.local_tf[p]/n
# pr.disable()
# pr.print_stats()
def count_tf_df_for_data(self):
'''
'''
print "Count tf/df for data"
for docid in xrange(self.N):
import cProfile, pstats, io
pr = cProfile.Profile(timeunit=1)
pr.enable()
#self.count_kmers(docid)
sequence = self.data[docid]
if self.verbose:
print "Process tf/df: ", docid, self.N, len(sequence)
_local_tf = defaultdict(int)
local_nf = {}
local_tf = defaultdict(int)
local_seen = {}
n = float(len(sequence))
for i in xrange(0, len(sequence) - self.k + 1):
kmer = sequence[i:i + self.k]
if self.SKIP_N and 'n' in kmer:
continue
_local_tf[kmer] += 1
for kmer in _local_tf:
if not kmer in self.kmers:
self.pointer += 1
self.kmer2p[kmer] = self.pointer
self.kmers.append(kmer)
p = self.kmer2p[kmer]
if not p in local_seen:
self.df[p] += 1
local_seen[p] = True
self.tf[p] += _local_tf[kmer]
local_tf[p] = _local_tf[kmer]
for p in local_tf:
local_nf[p] = local_tf[p]/n
for p in local_tf:
self.p2docids[p].append(docid)
self.p2doctfs[p].append(local_tf[p])
self.p2docnfs[p].append(local_nf[p])
pr.disable()
pr.print_stats()
print
def compute_distances(self):
'''
'''
print "Compute distances"
kmer_n = len(self.df)
for m, p in enumerate(self.df):
print m, kmer_n, "\r",
if self.df[p] == 1:
continue
docids = self.p2docids[p]
for i, docid_a in enumerate(docids):
for j, docid_b in enumerate(docids[i+1:]):
if docid_a < docid_b:
key = "%s\t%s" % (docid_a, docid_b)
else:
key = "%s\t%s" % (docid_b, docid_a)
self.D[key] += min(
self.p2docnfs[p][i],
self.p2docnfs[p][j]
)
print
def save_index_to_file(self, file_name):
'''
'''
print "Save index data"
with open(file_name, "w") as fh:
for p, kmer in enumerate(self.kmers):
d = (kmer,
get_revcomp(kmer),
self.tf[p],
self.df[p],
",".join(map(str, self.p2docids[p])),
",".join(map(str, self.p2doctfs[p])),
",".join(map(str, self.p2docnfs[p])),
)
d = "\t".join(map(str, d))
fh.write("%s\n" % d)
def save_distances_to_file(self, file_name):
'''
'''
print "Save distance data"
with open(file_name, "w") as fh:
for key, value in self.D.items():
fh.write("%s\t%s\n" % (key, value))
def compute_kmer_profiles_for_trs(trf_large_file, output_folder, k):
'''
'''
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
for i, trf_obj in enumerate(sc_iter_tab_file(trf_large_file, TRModel)):
print "Compute for", i, "\r",
file_name = os.path.join(output_folder, "%s.ngram" % str(trf_obj.trf_id))
ngrams = get_ngrams_freq(trf_obj.trf_array, m=NGRAM_N, n=k)
with open(file_name, "w") as fh:
for (ngram, tf, nf) in ngrams:
rngram = get_revcomp(ngram)
if ngram < rngram:
data = "%s\t%s\t%s\t%s\n" % (ngram, rngram, tf, nf)
else:
data = "%s\t%s\t%s\t%s\n" % (rngram, ngram, tf, nf)
fh.write(data)
print
def get_dust_score(sequence, k=4):
''' Return DUST score for given sequence and kmer length.
'''
d = defaultdict(int)
for i in range(0, len(sequence)-k+1):
d[sequence[i:i+k]] += 1
score = 0.
total = 0.
for v in d.values():
score += v*(v-1)/2.
total += score
return total/(len(sequence)-k+1)
def compile_ngrams(trf_large_file, ngram_index_file, k=NGRAM_LENGTH, cutoff=None, dust=False):
""" Compile ngrams collection for given project."""
data = []
trf_index = []
print "Read arrays..."
trf_index = get_all_trf_objs(trf_large_file)
trf_index = [trf_obj for trf_obj in trf_index if trf_obj.trf_array_length >= k]
data = [trf_obj.trf_array for trf_obj in trf_index]
print "Skipped %s arrays as short" % (len(trf_index) - len(data))
print "Process kmers..."
index_data = process_list_to_kmer_index(data, k, docids=True, cutoff=cutoff)
_process_index_data_to_file(ngram_index_file, index_data, k, dust=dust, trf_index=trf_index)
def _process_index_data_to_file(ngram_index_file, index_data, k, dust=False, trf_index=None):
print "Sort data..."
result = []
skipped_by_dust = 0
for i, (key, revkey, tf, df, docids, freqs) in enumerate(index_data):
if dust:
if get_dust_score(key) > dust:
skipped_by_dust += 1
continue
new_doc_ids = []
for j in docids:
new_doc_ids.append(trf_index[j].trf_id)
# sort docsis by freqs
items = []
for pos in range(len(new_doc_ids)):
all_items = trf_index[pos].trf_array_length - k + 1
if all_items <= 0:
continue
items.append((
freqs[pos] * 1. / all_items,
new_doc_ids[pos]
))
items.sort()
new_doc_ids = [str(x[1]) for x in items]
freqs = [str(x[0]) for x in items]
data = [key, revkey, tf, df, ",".join(new_doc_ids), ",".join(freqs)]
data = "%s\n" % "\t".join(map(str, data))
result.append(data)
if dust:
print "Skipped by dust:", skipped_by_dust
print "Save data to %s..." % ngram_index_file
with open(ngram_index_file, "w") as fh:
fh.writelines(result)
def compile_kmer_index_from_arrays(arrays, ngram_index_file, k=NGRAM_LENGTH):
""" Compile ngrams collection for given project."""
print "Process kmers..."
index_data = process_list_to_kmer_index(arrays, k, docids=True)
print "Sort data..."
result = []
for i, (key, revkey, tf, df, docids, freqs) in enumerate(index_data):
new_doc_ids = []
for j in docids:
new_doc_ids.append(j)
data = [key, revkey, tf, df, ",".join(map(str, new_doc_ids)), ",".join(map(str, freqs))]
data = "%s\n" % "\t".join(map(str, data))
result.append(data)
print "Save data..."
with open(ngram_index_file, "w") as fh:
fh.writelines(result)
def compile_kmer_index_from_kmer_profiles():
pass
def compute_distances(index_objs, id2length, index_function=None):
''' Compute distance between kmer profiles.
'''
print "Compute distances..."
n = len(index_objs)
D = {}
KEYS_CUTOFF = 1000000
chunk = 0
for k, kmer_index in enumerate(index_objs):
ids = kmer_index.docs
print "Current size", len(ids)
for i, trid_a in enumerate(ids):
print k, n, i, len(ids), "\r",
for j, trid_b in enumerate(ids[i+1:]):
if trid_a < trid_b:
key = "%s\t%s" % (trid_a, trid_b)
else:
key = "%s\t%s" % (trid_b, trid_a)
D.setdefault(key, 0.0)
D[key] += min(
kmer_index.freqs[i]*1./id2length[trid_a],
kmer_index.freqs[j]*1./id2length[trid_b]
)
if len(D) > KEYS_CUTOFF:
print "save D for chunk", chunk
file_name = "chunk%s.dat" % chunk
chunk += 1
with open(file_name, "w") as fh:
for key in D:
s = "%s\t%s\n" % (key, D[key])
fh.write(s)
D = {}
index_objs[k] = None
return D
def compute_distances_for_index(index, id2length, index_function=None):
''' Compute distance between kmer profiles.
'''
print "Compute distances..."
n = len(index)
D = {}
for k, kmer_index in enumerate(index):
ids = kmer_index[4]
for i, trid_a in enumerate(ids):
print k, n, i, len(ids), "\r",
for j, trid_b in enumerate(ids[i+1:]):
if trid_a < trid_b:
key = "%s\t%s" % (trid_a, trid_b)
else:
key = "%s\t%s" % (trid_b, trid_a)
D.setdefault(key, 0.0)
D[key] += min(
kmer_index[5][i]*1./id2length[trid_a],
kmer_index[5][j]*1./id2length[trid_b]
)
index[k] = None
return D
def compute_distances_for_index_by_raw_kmers(index, id2length, index_function=None):
''' Compute distance between kmer profiles.
'''
print "Compute distances..."
n = len(index)
D = {}
for k, kmer_index in enumerate(index):
ids = kmer_index[4]
for i, trid_a in enumerate(ids):
print k, n, i, len(ids), "\r",
for j, trid_b in enumerate(ids[i+1:]):
if trid_a < trid_b:
key = "%s\t%s" % (trid_a, trid_b)
else:
key = "%s\t%s" % (trid_b, trid_a)
D.setdefault(key, 0.0)
D[key] += min(
kmer_index[5][i],
kmer_index[5][j]
)
index[k] = None
return D | StarcoderdataPython |
3384822 | <reponame>softester-git/pytraining_v001<filename>model/group.py<gh_stars>0
from sys import maxsize
class Group:
def __init__(self, group_name=None, group_header=None, group_footer=None, group_id=None, contacts=None):
self.group_name = group_name
self.group_header = group_header
self.group_footer = group_footer
self.group_id = group_id
self.contacts = contacts
def __repr__(self):
return("%s:%s:%s:%s" % (self.group_name, self.group_header, self.group_footer, self.group_id))
def __eq__(self, other):
return((self.group_id is None or other.group_id is None or self.group_id == other.group_id) and self.group_name == other.group_name)
def id_or_max(self):
if self.group_id:
return int(self.group_id)
else:
return maxsize
| StarcoderdataPython |
8132847 | <filename>run/predist.py<gh_stars>1-10
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
"""
Module creates input for dist to use it. It also creates instances file that contains the mapping for dists
results and the instances.
"""
import sys
from itertools import chain
import gzip
instance_f = gzip.open(sys.argv[1], 'w')
for line in sys.stdin:
line = line.split()
m = len(line)
print >> instance_f, line[0]
t = zip(range(m-2), map(float, line[2:]))
values = sorted(t, key=lambda x: x[1], reverse=True)
print (m-2) * 2,
print " ".join(map(str, chain.from_iterable(values)))
instance_f.close()
| StarcoderdataPython |
6572255 | <reponame>crazy-zxx/3d-lmnet-update
import os
os.system('cd .. \n bash ./scripts/train_lm.sh') | StarcoderdataPython |
1749490 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:postgres@localhost:5432/python1_ass4'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Tablecoin(db.Model):
__tablename__ = 'NEWS'
id = db.Column(db.Integer, primary_key=True)
name_of_coin = db.Column( db.VARCHAR(255))
news = db.Column( db.VARCHAR(1000))
def __init__(self,id,name_of_coin, news):
self.id = id
self.name_of_coin = name_of_coin
self.news = news | StarcoderdataPython |
5151455 | <gh_stars>100-1000
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cross section table for depletion"""
import unittest
from armi.nuclearDataIO.cccc import isotxs
from armi.physics.neutronics.isotopicDepletion import (
crossSectionTable,
isotopicDepletionInterface as idi,
)
from armi.physics.neutronics.latticePhysics import ORDER
from armi.reactor.flags import Flags
from armi.reactor.tests.test_blocks import loadTestBlock
from armi.reactor.tests.test_reactors import loadTestReactor
from armi.settings import Settings
from armi.tests import ISOAA_PATH
class TestCrossSectionTable(unittest.TestCase):
def test_makeTable(self):
obj = loadTestBlock()
obj.p.mgFlux = range(33)
core = obj.getAncestorWithFlags(Flags.CORE)
core.lib = isotxs.readBinary(ISOAA_PATH)
table = crossSectionTable.makeReactionRateTable(obj)
self.assertEqual(len(obj.getNuclides()), len(table))
self.assertEqual(obj.getName(), "B0001-000")
self.assertEqual(table.getName(), "B0001-000")
self.assertTrue(table.hasValues())
xSecTable = table.getXsecTable()
self.assertEqual(len(xSecTable), 11)
self.assertIn("xsecs", xSecTable[0])
self.assertIn("mcnpId", xSecTable[-1])
def test_isotopicDepletionInterface(self):
o, r = loadTestReactor()
cs = Settings()
aid = idi.AbstractIsotopicDepleter(r, cs)
self.assertIsNone(aid.efpdToBurn)
self.assertEqual(len(aid._depleteByName), 0)
self.assertEqual(len(aid.getToDeplete()), 0)
self.assertEqual(ORDER, 5.0)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3563530 | from typing import Union, NamedTuple, Dict, Set, Iterable, Optional
from collections import OrderedDict
from conllu import TokenList
# Constants
MWE_FIELD = "parseme:mwe"
MWE_NONE = "*"
MWE_UNKOWN = "_"
# Token ID is normally a single number (1, 2, ...), but it can be also
# a three-element tuple in special situations, for instance:
# * "1.1" => (1, '.', 1)
# * "1-2" => (1, '-', 2)
TokID = Union[int, tuple]
# MWE identifier (has the scope of the corresponding sentence)
MweID = int
# MWE category
MweCat = str
class MWE(NamedTuple):
"""MWE annotation"""
cat: Optional[MweCat]
span: Set[TokID]
def _join_mwes(x: MWE, y: MWE) -> MWE:
"""Join two MWEs into one.
This requires that both input MWEs have the same category.
Otherwise, an exception is raised (which would indicate that
there's an annotation error in a .cupt file).
"""
if x.cat and y.cat and x.cat != y.cat:
raise Exception("cannot join MWEs with different categories")
else:
cat = x.cat or y.cat
return MWE(cat, x.span.union(y.span))
def _update_dict_with(d: Dict[MweID, MWE], new: Dict[MweID, MWE]):
"""Update the first dictionary with MWEs from the second dictionary."""
for ix in new.keys():
if ix in d:
mwe = _join_mwes(d[ix], new[ix])
else:
mwe = new[ix]
d[ix] = mwe
def _mwes_in_tok(tok: OrderedDict) -> Dict[MweID, MWE]:
"""Extract MWE fragments annotated for the given token."""
mwe_anno = tok["parseme:mwe"]
if mwe_anno in [MWE_NONE, MWE_UNKOWN]:
return dict()
else:
result = dict()
span = set([tok["id"]])
for mwe_raw in mwe_anno.split(';'):
mwe_info = mwe_raw.split(':')
if len(mwe_info) == 2:
(ix, cat) = mwe_info
else:
(ix,), cat = mwe_info, None
result[int(ix)] = MWE(cat, span)
return result
def retrieve_mwes(sent: TokenList) -> Dict[MweID, MWE]:
"""Retrieve MWEs from the given sentence."""
result = dict() # type: Dict[MweID, MWE]
for tok in sent:
tok_mwes = _mwes_in_tok(tok)
_update_dict_with(result, tok_mwes)
return result
def clear_mwes(sent: TokenList, value=MWE_NONE):
"""Clear all MWEs annotations in the given sentence."""
for tok in sent:
tok[MWE_FIELD] = value
def add_mwe(sent: TokenList, mwe_id: MweID, mwe: MWE):
"""Add the MWE with the given ID to the given sentence.
The function does not check if a MWE with the given ID already
exists, neither if a MWE with the same category and the same
set of tokens already exists in the sentence. Use with caution.
"""
# Retrieve the list of tokens as a sorted list
span = sorted(mwe.span)
# Check some invariants, just in case
assert len(span) >= 1
assert span[0] == min(span)
# Create a dictionary from token IDs to actual tokens
tok_map = {}
for tok in sent:
tok_map[tok['id']] = tok
# Helper function
def update(tok_id, mwe_str):
tok = tok_map[tok_id]
if tok[MWE_FIELD] in [MWE_NONE, MWE_UNKOWN]:
tok[MWE_FIELD] = mwe_str
else:
tok[MWE_FIELD] += ";" + mwe_str
# Update the first MWE component token
if mwe.cat:
mwe_str = ":".join([str(mwe_id), mwe.cat])
else:
mwe_str = str(mwe_id)
update(span[0], mwe_str)
# Update the remaining MWE component tokens
mwe_str = str(mwe_id)
for tok_id in span[1:]:
update(tok_id, mwe_str)
def replace_mwes(sent: TokenList, mwes: Iterable[MWE]):
"""Replace the MWE annotations in the sentence with new MWEs."""
clear_mwes(sent)
mwe_id = 1
for mwe in mwes:
add_mwe(sent, mwe_id, mwe)
mwe_id += 1
| StarcoderdataPython |
1783719 | import dgl
import dgl.function as Fn
from dgl.ops import edge_softmax
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleHGNConv(nn.Module):
def __init__(self,
edge_feats,
in_features,
out_features,
num_heads,
num_etype,
feat_drop=0.0,
attn_drop=0.5,
negative_slope=0.2,
residual=True,
activation=F.elu,
beta=0.0
):
super(SimpleHGNConv, self).__init__()
self.edge_feats = edge_feats
self.in_features = in_features
self.out_features = out_features
self.num_heads = num_heads
self.num_etype = num_etype
self.edge_emb = nn.Parameter(torch.zeros(size=(num_etype, edge_feats)))
self.W = nn.Parameter(torch.FloatTensor(
in_features, out_features * num_heads))
self.W_e = nn.Parameter(torch.FloatTensor(
edge_feats, edge_feats * num_heads))
self.a_l = nn.Parameter(torch.empty(size=(1, num_heads, out_features)))
self.a_r = nn.Parameter(torch.empty(size=(1, num_heads, out_features)))
self.a_e = nn.Parameter(torch.empty(size=(1, num_heads, edge_feats)))
nn.init.xavier_uniform_(self.edge_emb, gain=1.414)
nn.init.xavier_uniform_(self.W, gain=1.414)
nn.init.xavier_uniform_(self.W_e, gain=1.414)
nn.init.xavier_uniform_(self.a_l.data, gain=1.414)
nn.init.xavier_uniform_(self.a_r.data, gain=1.414)
nn.init.xavier_uniform_(self.a_e.data, gain=1.414)
self.feat_drop = nn.Dropout(feat_drop)
self.dropout = nn.Dropout(attn_drop)
self.leakyrelu = nn.LeakyReLU(negative_slope)
self.activation = activation
if residual:
self.residual = nn.Linear(in_features, out_features * num_heads)
else:
self.register_buffer("residual", None)
self.beta = beta
def forward(self, g, h):
emb = self.feat_drop(h)
emb = torch.matmul(emb, self.W).view(-1, self.num_heads, self.out_features)
emb[torch.isnan(emb)] = 0.0
e = torch.matmul(self.edge_emb, self.W_e).view(-1,
self.num_heads, self.edge_feats)
row = g.edges()[0]
col = g.edges()[1]
tp = g.edata['_TYPE']
# tp = g.edge_type
h_l = (self.a_l * emb).sum(dim=-1)[row]
h_r = (self.a_r * emb).sum(dim=-1)[col]
h_e = (self.a_e * e).sum(dim=-1)[tp]
edge_attention = self.leakyrelu(h_l + h_r + h_e)
edge_attention = edge_softmax(g, edge_attention)
if 'alpha' in g.edata.keys():
res_attn = g.edata['alpha']
edge_attention = edge_attention * \
(1 - self.beta) + res_attn * self.beta
with g.local_scope():
h_prime = []
emb = emb.permute(1, 0, 2).contiguous()
for i in range(self.num_heads):
g.edata['alpha'] = edge_attention[:, i]
g.srcdata.update({'emb': emb[i]})
g.update_all(Fn.u_mul_e('emb', 'alpha', 'm'),
Fn.sum('m', 'emb'))
h_prime.append(g.ndata['emb'])
h_output = torch.cat(h_prime, dim=1)
g.edata['alpha'] = edge_attention
if self.residual:
res = self.residual(h)
h_output += res
h_output = self.activation(h_output)
return h_output
| StarcoderdataPython |
8140440 | <reponame>jdschleicher/CumulusCI
import responses
import pytest
from cumulusci.core.exceptions import CumulusCIException
from cumulusci.tasks.salesforce.users.permsets import (
AssignPermissionSets,
AssignPermissionSetLicenses,
AssignPermissionSetGroups,
)
from cumulusci.tasks.salesforce.tests.util import create_task
class TestCreatePermissionSet:
@responses.activate
def test_create_permset(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetId": "0PS000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CName+FROM+PermissionSet+WHERE+Name+IN+%28%27PermSet1%27%2C+%27PermSet2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PS000000000000",
"Name": "PermSet1",
},
{
"Id": "0PS000000000001",
"Name": "PermSet2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PS000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permset__alias(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetId": "0PS000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CName+FROM+PermissionSet+WHERE+Name+IN+%28%27PermSet1%27%2C+%27PermSet2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PS000000000000",
"Name": "PermSet1",
},
{
"Id": "0PS000000000001",
"Name": "PermSet2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PS000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permset__alias_raises(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 0,
"records": [],
},
)
with pytest.raises(CumulusCIException):
task()
@responses.activate
def test_create_permset_raises(self):
task = create_task(
AssignPermissionSets,
{
"api_names": "PermSet1,PermSet2,PermSet3",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetId": "0PS000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CName+FROM+PermissionSet+WHERE+Name+IN+%28%27PermSet1%27%2C+%27PermSet2%27%2C+%27PermSet3%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PS000000000000",
"Name": "PermSet1",
},
{
"Id": "0PS000000000001",
"Name": "PermSet2",
},
],
},
)
with pytest.raises(CumulusCIException):
task()
class TestCreatePermissionSetLicense:
@responses.activate
def test_create_permsetlicense(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetLicenseAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetLicenseId": "0PL000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PL000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetlicense__no_assignments(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
# This seems like a bug: the PermissionSetLicenseAssignments sub-query returns None if no PSLs are already assigned instead of returning an "empty list".
"PermissionSetLicenseAssignments": None,
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000000", "success": True, "errors": []},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 4
assert "0PL000000000000" in responses.calls[2].request.body
assert "0PL000000000001" in responses.calls[3].request.body
@responses.activate
def test_create_permsetlicense__alias(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetLicenseAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetLicenseId": "0PL000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetLicenseAssign/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PL000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetlicense__alias_raises(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 0,
"records": [],
},
)
with pytest.raises(CumulusCIException):
task()
@responses.activate
def test_create_permsetlicense_raises(self):
task = create_task(
AssignPermissionSetLicenses,
{
"api_names": "PermSetLicense1,PermSetLicense2,PermSetLicense3",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetLicenseId+FROM+PermissionSetLicenseAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetLicenseAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetLicenseId": "0PL000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetLicense+WHERE+DeveloperName+IN+%28%27PermSetLicense1%27%2C+%27PermSetLicense2%27%2C+%27PermSetLicense3%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PL000000000000",
"DeveloperName": "PermSetLicense1",
},
{
"Id": "0PL000000000001",
"DeveloperName": "PermSetLicense2",
},
],
},
)
with pytest.raises(CumulusCIException):
task()
class TestCreatePermissionSetGroup:
@responses.activate
def test_create_permsetgroup(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetGroupId": "0PG000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetGroup+WHERE+DeveloperName+IN+%28%27PermSetGroup1%27%2C+%27PermSetGroup2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PG000000000000",
"DeveloperName": "PermSetGroup1",
},
{
"Id": "0PG000000000001",
"DeveloperName": "PermSetGroup2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PG000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetgroup__alias(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetGroupId": "0PG000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetGroup+WHERE+DeveloperName+IN+%28%27PermSetGroup1%27%2C+%27PermSetGroup2%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PG000000000000",
"DeveloperName": "PermSetGroup1",
},
{
"Id": "0PG000000000001",
"DeveloperName": "PermSetGroup2",
},
],
},
)
responses.add(
method="POST",
url=f"{task.org_config.instance_url}/services/data/v50.0/sobjects/PermissionSetAssignment/",
status=200,
json={"id": "0Pa000000000001", "success": True, "errors": []},
)
task()
assert len(responses.calls) == 3
assert "0PG000000000001" in responses.calls[2].request.body
@responses.activate
def test_create_permsetgroup__alias_raises(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2",
"user_alias": "test",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Alias+%3D+%27test%27",
status=200,
json={
"done": True,
"totalSize": 0,
"records": [],
},
)
with pytest.raises(CumulusCIException):
task()
@responses.activate
def test_create_permsetgroup_raises(self):
task = create_task(
AssignPermissionSetGroups,
{
"api_names": "PermSetGroup1,PermSetGroup2,PermSetGroup3",
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2C%28SELECT+PermissionSetGroupId+FROM+PermissionSetAssignments%29+FROM+User+WHERE+Username+%3D+%27test-cci%40example.com%27",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "005000000000000",
"PermissionSetAssignments": {
"done": True,
"totalSize": 1,
"records": [{"PermissionSetGroupId": "0PG000000000000"}],
},
}
],
},
)
responses.add(
method="GET",
url=f"{task.org_config.instance_url}/services/data/v50.0/query/?q=SELECT+Id%2CDeveloperName+FROM+PermissionSetGroup+WHERE+DeveloperName+IN+%28%27PermSetGroup1%27%2C+%27PermSetGroup2%27%2C+%27PermSetGroup3%27%29",
status=200,
json={
"done": True,
"totalSize": 1,
"records": [
{
"Id": "0PG000000000000",
"DeveloperName": "PermSetGroup1",
},
{
"Id": "0PG000000000001",
"DeveloperName": "PermSetGroup2",
},
],
},
)
with pytest.raises(CumulusCIException):
task()
| StarcoderdataPython |
1625799 | class CursorAutoMover:
def __init__(self, target, xrange=(0,5000), yrange=(0,3000), xvel=100, yvel=100):
self.target = target
self.xvel = xvel
self.yvel = yvel
self.xrange = xrange
self.yrange = yrange
self.visible = False
def draw(self):
pass
def update(self, secs, app):
posTuple = self.target.getPos()
pos = Vec2(posTuple[0], posTuple[1])
pos.x = pos.x + self.xvel * secs
pos.y = pos.y + self.yvel * secs
if pos.x > self.xrange[1]:
self.xvel = -abs(self.xvel)
if pos.y > self.yrange[1]:
self.yvel = -abs(self.yvel)
if pos.x < self.xrange[0]:
self.xvel = abs(self.xvel)
if pos.y < self.xrange[0]:
self.yvel = abs(self.yvel)
self.target.setPos(pos.x, pos.y)
# print "pos:", pos.x, pos.y
| StarcoderdataPython |
11223542 | # Check list number can divide to 13 and store in list
my_list = [12, 65, 54, 39, 102, 339, 221]
result = list(filter(lambda x: (x % 13 == 0), my_list))
print("Numbers divided to 13 are ", result)
| StarcoderdataPython |
9698025 | <filename>kokemomo/plugins/engine/utils/km_logging.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
from logging.handlers import RotatingFileHandler, HTTPHandler
from kokemomo.settings import SETTINGS
__author__ = 'hiroki'
class KMLoggingHandlerList(object):
__instance = None
handlers = {}
def __new__(cls, *args, **keys):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
cls.__initHandlers()
return cls.__instance
@classmethod
def __initHandlers(cls):
mod = __import__('logging.handlers', fromlist=['RotatingFileHandler','HTTPHandler'])
class_def = getattr(mod, "RotatingFileHandler")
handler = class_def(
filename=SETTINGS.LOGGER['RotatingFileHandler']['filename'],
maxBytes=SETTINGS.LOGGER['RotatingFileHandler']['maxBytes'],
backupCount=SETTINGS.LOGGER['RotatingFileHandler']['backupCount'])
formatter = logging.Formatter(SETTINGS.LOGGER['RotatingFileHandler']['format'])
handler.setFormatter(formatter)
cls.handlers['RotatingFileHandler'] = handler
class_def = getattr(mod, "HTTPHandler")
handler = class_def(
host=SETTINGS.LOGGER['HTTPHandler']['host'],
url=SETTINGS.LOGGER['HTTPHandler']['url'],
method=SETTINGS.LOGGER['HTTPHandler']['method'])
cls.handlers['HTTPHandler'] = handler
sqllogger = logging.getLogger('sqlalchemy.pool')
sqllogger.addHandler(cls.handlers['RotatingFileHandler'])
sqllogger.setLevel(logging.CRITICAL)
sqllogger = logging.getLogger('sqlalchemy.engine')
sqllogger.addHandler(cls.handlers['RotatingFileHandler'])
sqllogger.setLevel(logging.CRITICAL)
sqllogger = logging.getLogger('sqlalchemy.orm.unitofwork')
sqllogger.addHandler(cls.handlers['RotatingFileHandler'])
sqllogger.setLevel(logging.CRITICAL)
sqllogger = logging.getLogger('sqlalchemy.engine.base.Engine')
sqllogger.addHandler(cls.handlers['RotatingFileHandler'])
sqllogger.setLevel(logging.CRITICAL)
def get_handler(self, name):
return KMLoggingHandlerList.handlers[name]
class KMLogger(object):
def __init__(self, name):
self.logger = logging.getLogger(name)
self.logger.setLevel(self.__get_level(SETTINGS.PLUGINS[name]['level']))
handler_list = KMLoggingHandlerList()
handler = handler_list.get_handler(SETTINGS.PLUGINS[name]['logger'])
self.logger.addHandler(handler)
def debug(self, msg, *args, **kwargs):
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.logger.error(msg, *args, **kwargs)
def __get_level(self, level):
if level == 'DEBUG':
return logging.DEBUG
elif level == 'INFO':
return logging.INFO
elif level == 'ERROR':
return logging.ERROR
elif level == 'WARNING':
return logging.WARNING
elif level == 'CRITICAL':
return logging.CRITICAL
| StarcoderdataPython |
11266393 | #==============================================================================
#
# This code was developed as part of the Astronomy Data and Computing Services
# (ADACS; https:#adacs.org.au) 2017B Software Support program.
#
# Written by: <NAME>, <NAME>, <NAME>
# Date: December 2017
#
# It is distributed under the MIT (Expat) License (see https:#opensource.org/):
#
# Copyright (c) 2017 Astronomy Data and Computing Services (ADACS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#==============================================================================
VERIFY_EMAIL_ADDRESS = dict()
VERIFY_EMAIL_ADDRESS['subject'] = '[GBKFIT-Cloud] Please verify your email address'
VERIFY_EMAIL_ADDRESS['message'] = '<p>Dear {{title}} {{first_name}} {{last_name}}</p>' \
'<p>We have received a new account request with our GBKFIT-Cloud system from this' \
'email address. Please verify your email address by clicking on the following ' \
'<a href="{{link}}" target="_blank">link</a>:</p>' \
'<p><a href="{{link}}" target="_blank">{{link}}</a></p>' \
'<p>If you believe that the email has been sent by mistake or you have not ' \
'requested for an account please <strong>do not</strong> click on the link.</p>' \
'<p>Alternatively you can report this incident to <a ' \
'href="mailto:<EMAIL>" target="_top"><EMAIL></a> for ' \
'investigation.</p>' \
'<p> </p>' \
'<p>Regards,</p>' \
'<p>GBKFIT-Cloud Team</p>'
| StarcoderdataPython |
45153 | <reponame>alex/django-old
from django.test import TestCase
from models import Simple
class InitialSQLTests(TestCase):
def test_initial_sql(self):
self.assertEqual(Simple.objects.count(), 7)
| StarcoderdataPython |
3485640 | #!/usr/bin/env python
# coding=utf8
"""
Created on 2014年2月15日
@author: liaoqiqi
"""
myfile = open("url_resources.txt")
lines = myfile.readlines()
print "use disconf;"
print 'delete from role_resource;'
for line in lines:
line = line.strip('\n')
if not line:
continue
if line[0] == '#':
continue
data = line.split()
if len(data) != 5 and len(data) != 3:
# print "cannot process this: " + line
continue
url = data[0]
desc = data[1]
if len(data) == 3:
role1_mask = data[2]
role2_mask = role1_mask
role3_mask = role1_mask
else:
role1_mask = data[2]
role2_mask = data[3]
role3_mask = data[4]
URL_PREFIX = "/api"
print "INSERT INTO `role_resource` (`role_id`, `url_pattern`, `url_description`, `method_mask`) VALUES"
print "(1,'" + URL_PREFIX + url + "' , '" + desc + "' , '" + role1_mask + "'),"
print "(2,'" + URL_PREFIX + url + "' , '" + desc + "' , '" + role2_mask + "'),"
print "(3,'" + URL_PREFIX + url + "' , '" + desc + "' , '" + role3_mask + "');"
| StarcoderdataPython |
1878505 | from typing import Type
from datatype import YLObject
class RawModule(YLObject):
pass
class Module(RawModule):
pass
def yl(cls: Type[YLObject]):
# name binding
for k, v in filter(lambda pair: isinstance(pair[1], YLObject), cls.__dict__.items()):
v.bindname = "{}_{}".format(cls.__name__, k)
try:
cls.__dict__['io'] = yl(cls.__dict__['io'])
except:
pass
return cls
| StarcoderdataPython |
1717103 | <filename>src/simmate/toolkit/transformations/coordinate_perturation_ordered.py
# -*- coding: utf-8 -*-
from simmate.toolkit.transformations.base import Transformation
class CoordinateOrderedPerturbation(Transformation):
# known as "coordinate mutation" in USPEX
# site locations are mutated where sites with lower order have higher preference for mutation
# https://uspex-team.org/static/file/USPEX-LargeComplexSystems-2010.pdf
# ""Coordinate mutation was found [2] to be ineffective, because “blind” displacement of the
# atoms is much more likely to decrease the quality of a structure than to increase it.""
#!!! because of the quote above, the coordinate displacement is order-dependent
io_scale = "one_to_one"
ninput = 1
use_multiprocessing = False
def __init__(
self,
):
pass
def apply_transformation(self, structure, max_attempts=100):
return
| StarcoderdataPython |
4830042 | from django.db import models
class ActivityRateIssue(models.Model):
owner = models.CharField(max_length=150)
repo = models.CharField(max_length=150)
activity_max_rate = models.DecimalField(max_digits=5, decimal_places=2,
default=0.00)
activity_rate = models.DecimalField(max_digits=5, decimal_places=2,
default=0.00)
active_issues = models.IntegerField(default=0)
dead_issues = models.IntegerField(default=0)
class Meta:
unique_together = (('owner', 'repo'),)
| StarcoderdataPython |
9794178 | <reponame>xiaomaiAI/pyansys
from sys import platform
import os
import pytest
import numpy as np
from vtk import (VTK_TETRA, VTK_QUADRATIC_TETRA, VTK_PYRAMID,
VTK_QUADRATIC_PYRAMID, VTK_WEDGE,
VTK_QUADRATIC_WEDGE, VTK_HEXAHEDRON,
VTK_QUADRATIC_HEXAHEDRON)
from pyvista import examples as pyvista_examples
import pyvista as pv
import pyansys
from pyansys import examples
IS_MAC = platform == 'darwin'
LINEAR_CELL_TYPES = [VTK_TETRA,
VTK_PYRAMID,
VTK_WEDGE,
VTK_HEXAHEDRON]
test_path = os.path.dirname(os.path.abspath(__file__))
testfiles_path = os.path.join(test_path, 'test_data')
@pytest.fixture(scope='module')
def hex_archive():
return pyansys.Archive(examples.hexarchivefile)
@pytest.fixture(scope='module')
def all_solid_cells_archive():
return pyansys.Archive(os.path.join(testfiles_path, 'all_solid_cells.cdb'))
@pytest.fixture(scope='module')
def all_solid_cells_archive_linear():
return pyansys.Archive(os.path.join(testfiles_path, 'all_solid_cells.cdb'),
force_linear=True)
def test_read_mesh200():
archive = pyansys.Archive(os.path.join(testfiles_path, 'mesh200.cdb'))
assert archive.grid.n_cells == 1000
def test_archive_init(hex_archive):
assert isinstance(hex_archive._raw, dict)
assert isinstance(hex_archive.grid, pv.UnstructuredGrid)
def test_parse_vtk(hex_archive):
grid = hex_archive.grid
assert grid.points.size
assert grid.cells.size
assert 'ansys_node_num' in grid.point_arrays
assert np.all(hex_archive.quality > 0)
with pytest.raises(TypeError):
hex_archive._parse_vtk(allowable_types=-1)
with pytest.raises(TypeError):
hex_archive._parse_vtk(allowable_types=3.0)
def test_invalid_archive(tmpdir, hex_archive):
nblock_filename = str(tmpdir.mkdir("tmpdir").join('nblock.cdb'))
pyansys.write_nblock(nblock_filename, hex_archive.nnum,
hex_archive.nodes)
archive = pyansys.Archive(nblock_filename)
with pytest.raises(AttributeError):
archive.grid
def test_write_angle(tmpdir, hex_archive):
nblock_filename = str(tmpdir.mkdir("tmpdir").join('nblock.cdb'))
pyansys.write_nblock(nblock_filename, hex_archive.nnum,
hex_archive.nodes, hex_archive.node_angles)
archive = pyansys.Archive(nblock_filename, parse_vtk=False)
assert np.allclose(archive.nodes, hex_archive.nodes)
@pytest.mark.skipif(IS_MAC, reason="TODO: Unexplained behavior")
def test_missing_midside():
allowable_types = [45, 95, 185, 186, 92, 187]
archive_file = os.path.join(testfiles_path, 'mixed_missing_midside.cdb')
archive = pyansys.Archive(archive_file, allowable_types=allowable_types)
assert (archive.quality > 0.0).all()
assert not np.any(archive.grid.celltypes == VTK_TETRA)
def test_writehex(tmpdir, hex_archive):
temp_archive = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
pyansys.save_as_archive(temp_archive, hex_archive.grid)
archive_new = pyansys.Archive(temp_archive)
assert np.allclose(hex_archive.grid.points, archive_new.grid.points)
assert np.allclose(hex_archive.grid.cells, archive_new.grid.cells)
def test_writesector(tmpdir):
archive = pyansys.Archive(examples.sector_archive_file)
filename = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
pyansys.save_as_archive(filename, archive.grid)
archive_new = pyansys.Archive(filename)
assert np.allclose(archive.grid.points, archive_new.grid.points)
assert np.allclose(archive.grid.cells, archive_new.grid.cells)
def test_writehex_missing_elem_num(tmpdir, hex_archive):
grid = hex_archive.grid
grid.cell_arrays['ansys_elem_num'][:10] = -1
grid.cell_arrays['ansys_etype'] = np.ones(grid.number_of_cells)*-1
grid.cell_arrays['ansys_elem_type_num'] = np.ones(grid.number_of_cells)*-1
filename = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
pyansys.save_as_archive(filename, grid)
archive_new = pyansys.Archive(filename)
assert np.allclose(hex_archive.grid.points, archive_new.grid.points)
assert np.allclose(hex_archive.grid.cells, archive_new.grid.cells)
def test_writehex_missing_node_num(tmpdir, hex_archive):
hex_archive.grid.point_arrays['ansys_node_num'][:-1] = -1
temp_archive = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
pyansys.save_as_archive(temp_archive, hex_archive.grid)
archive_new = pyansys.Archive(temp_archive)
assert np.allclose(hex_archive.grid.points.shape, archive_new.grid.points.shape)
assert np.allclose(hex_archive.grid.cells.size, archive_new.grid.cells.size)
def test_write_non_ansys_grid(tmpdir):
grid = pv.UnstructuredGrid(pyvista_examples.hexbeamfile)
del grid.point_arrays['sample_point_scalars']
del grid.cell_arrays['sample_cell_scalars']
archive_file = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
pyansys.save_as_archive(archive_file, grid)
def test_read_complex_archive(all_solid_cells_archive):
nblock_expected = np.array([
[3.7826539829200E+00, 1.2788958692644E+00, -1.0220880953640E+00],
[3.7987359490873E+00, 1.2312085780780E+00, -1.0001885444969E+00],
[3.8138798206653E+00, 1.1833200772896E+00, -9.7805743587145E-01],
[3.7751258193793E+00, 1.2956563072306E+00, -9.9775569295981E-01],
[3.7675976558386E+00, 1.3124167451968E+00, -9.7342329055565E-01],
[3.8071756567432E+00, 1.2018089624856E+00, -9.5159140433025E-01],
[3.8004714928212E+00, 1.2202978476816E+00, -9.2512537278904E-01],
[3.7840345743299E+00, 1.2663572964392E+00, -9.4927433167235E-01],
[3.8682501483615E+00, 1.4211343558710E+00, -9.2956245308371E-01],
[3.8656154427804E+00, 1.4283573726940E+00, -9.3544082975315E-01],
[3.8629807371994E+00, 1.4355803895169E+00, -9.4131920642259E-01],
[3.8698134427618E+00, 1.4168612083433E+00, -9.3457292477788E-01],
[3.8645201728196E+00, 1.4314324609914E+00, -9.4526873324423E-01],
[3.8713767371621E+00, 1.4125880608155E+00, -9.3958339647206E-01],
[3.8687181728010E+00, 1.4199362966407E+00, -9.4440082826897E-01],
[3.8660596084399E+00, 1.4272845324660E+00, -9.4921826006588E-01],
[3.7847463501820E+00, 1.2869612289286E+00, -1.0110875234148E+00],
[3.7882161293470E+00, 1.2952473975570E+00, -1.0006326084202E+00],
[3.7840036708439E+00, 1.3089808408341E+00, -9.8189659453120E-01],
[3.7736944340897E+00, 1.3175655146540E+00, -9.6829193559890E-01],
[3.7797912123408E+00, 1.3227142841112E+00, -9.6316058064216E-01],
[3.8163322819008E+00, 1.1913589544053E+00, -9.6740419078720E-01],
[3.8046827481496E+00, 1.2474593204382E+00, -9.7922600135387E-01],
[3.8202228218151E+00, 1.1995824283636E+00, -9.5733187068101E-01],
[3.9797161316330E+00, 2.5147820926190E-01, -5.1500799817626E-01],
[3.9831382922541E+00, 2.0190980565891E-01, -5.0185526897444E-01],
[3.9810868976408E+00, 2.3910377061737E-01, -5.4962360790281E-01],
[3.9772930845240E+00, 2.8865001362748E-01, -5.6276585706615E-01],
[3.9816265976187E+00, 2.1428739259987E-01, -4.6723916677654E-01],
[3.9839413943097E+00, 1.8949722823843E-01, -5.3648152416530E-01],
[3.7962006776348E+00, 1.2764624207283E+00, -9.3931008487698E-01],
[3.8126101429289E+00, 1.2302105573453E+00, -9.1545958911180E-01],
[3.8065408178751E+00, 1.2252542025135E+00, -9.2029248095042E-01],
[3.8164164823720E+00, 1.2148964928545E+00, -9.3639572989640E-01],
[3.8972892823450E+00, 2.7547119775919E-01, -5.6510422311694E-01],
[3.9015993648189E+00, 2.0235606714652E-01, -4.6987255385930E-01],
[3.9023812010290E+00, 1.7705558022279E-01, -5.3881795411458E-01],
[3.9019902829240E+00, 1.8970582368465E-01, -5.0434525398694E-01],
[3.8998352416870E+00, 2.2626338899099E-01, -5.5196108861576E-01],
[3.8994443235820E+00, 2.3891363245285E-01, -5.1748838848812E-01],
[3.9372911834345E+00, 2.8206060569333E-01, -5.6393504009155E-01],
[3.9416129812188E+00, 2.0832172987319E-01, -4.6855586031792E-01],
[3.9431612976694E+00, 1.8327640423061E-01, -5.3764973913994E-01],
[3.8619577233846E+00, 1.4192189812407E+00, -9.2587403626770E-01],
[3.8507167163959E+00, 1.4238788373222E+00, -9.3661710728291E-01],
[3.8651039358730E+00, 1.4201766685559E+00, -9.2771824467570E-01],
[3.8624692302920E+00, 1.4273996853788E+00, -9.3359662134515E-01],
[3.8610467267790E+00, 1.4182334490688E+00, -9.3810025187748E-01],
[3.8563372198902E+00, 1.4215489092814E+00, -9.3124557177530E-01],
[3.8568487267976E+00, 1.4297296134196E+00, -9.3896815685275E-01],
[3.8583881624179E+00, 1.4255816848941E+00, -9.4291768367439E-01],
[3.8594834323787E+00, 1.4225065965966E+00, -9.3308978018331E-01]])
assert np.allclose(nblock_expected, all_solid_cells_archive.nodes)
grid = all_solid_cells_archive.grid
assert grid.number_of_cells == 4
assert np.unique(grid.celltypes).size == 4
assert np.all(grid.celltypes > 20)
assert np.all(all_solid_cells_archive.quality > 0.0)
def test_read_complex_archive_linear(all_solid_cells_archive_linear):
grid = all_solid_cells_archive_linear.grid
assert np.all(grid.celltypes < 20)
assert np.all(all_solid_cells_archive_linear.quality > 0.0)
@pytest.mark.parametrize('celltype', [VTK_QUADRATIC_TETRA,
VTK_QUADRATIC_PYRAMID,
VTK_QUADRATIC_WEDGE,
VTK_QUADRATIC_HEXAHEDRON])
def test_write_quad_complex_archive(tmpdir, celltype, all_solid_cells_archive):
grid = all_solid_cells_archive.grid
mask = grid.celltypes == celltype
assert mask.any()
grid = grid.extract_cells(mask)
try:
tmp_archive_file = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
except:
tmp_archive_file = '/tmp/nblock.cdb'
pyansys.save_as_archive(tmp_archive_file, grid)
new_archive = pyansys.Archive(tmp_archive_file)
assert np.allclose(grid.cells, new_archive.grid.cells)
assert np.allclose(grid.points, new_archive.grid.points)
assert (new_archive.quality > 0.0).all()
@pytest.mark.parametrize('celltype', LINEAR_CELL_TYPES)
def test_write_lin_archive(tmpdir, celltype, all_solid_cells_archive_linear):
linear_grid = all_solid_cells_archive_linear.grid
mask = linear_grid.celltypes == celltype
assert mask.any()
linear_grid = linear_grid.extract_cells(mask)
tmp_archive_file = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
pyansys.save_as_archive(tmp_archive_file, linear_grid)
new_archive = pyansys.Archive(tmp_archive_file)
assert new_archive.quality > 0
assert np.allclose(linear_grid.celltypes, new_archive.grid.celltypes)
def test_write_component(tmpdir):
items = np.array([1, 20, 50, 51, 52, 53])
temp_archive = str(tmpdir.mkdir("tmpdir").join('tmp.cdb'))
comp_name = 'TEST'
pyansys.write_cmblock(temp_archive, items, comp_name, 'node')
archive = pyansys.Archive(temp_archive)
assert np.allclose(archive.node_components[comp_name], items)
def test_read_parm():
filename = os.path.join(testfiles_path, 'parm.cdb')
archive = pyansys.Archive(filename)
with pytest.raises(AttributeError):
archive.parameters
archive = pyansys.Archive(filename, read_parameters=True)
assert len(archive.parameters) == 2
for parm in archive.parameters:
assert isinstance(archive.parameters[parm], np.ndarray)
def test_read_wb_nblock():
expected = np.array([[9.89367578e-02, -8.07092192e-04, 8.53764953e+00],
[9.65803244e-02, 2.00906704e-02, 8.53744951e+00],
[9.19243555e-02, 3.98781615e-02, 8.53723652e+00]])
filename = os.path.join(testfiles_path, 'workbench_193.cdb')
archive = pyansys.Archive(filename)
assert np.allclose(archive.nodes, expected)
assert np.allclose(archive.node_angles, 0)
def test_read_hypermesh():
expected = np.array([[-6.01203, 2.98129, 2.38556],
[-3.03231, 2.98067, 2.38309],
[-0.03485, 2.98004, 2.3805],
[2.98794, 2.97941, 2.37773],
[5.98956, 2.97878, 2.37488],
[5.98956, 5.97878, 2.37488]])
filename = os.path.join(testfiles_path, 'hypermesh.cdb')
archive = pyansys.Archive(filename, verbose=True)
assert np.allclose(archive.nodes[:6], expected)
| StarcoderdataPython |
8061107 | <reponame>LinjianMa/neuralODE-282<filename>multi-output-glucose-forecasting/lib/gru.py
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
from torch.nn import Parameter
from torch import Tensor
import torch.nn.functional as F
import math
class GRUODEfunc(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(GRUODEfunc, self).__init__()
self.bias = bias
self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.nfe = 0
def forward(self, t, hidden):
self.nfe += 1
#gate_x = self.x2h(x)
gate_h = self.h2h(hidden)
#gate_x = gate_x.squeeze()
gate_h = gate_h.squeeze()
#i_r, i_i, i_n = gate_x.chunk(3, 1)
h_r, h_i, h_n = gate_h.chunk(3, 1)
resetgate = F.sigmoid(h_r)
inputgate = F.sigmoid(h_i)
newgate = F.tanh(resetgate * h_n)
hy = (1-inputgate) * (newgate-hidden)
return hy
class GRUODECell(nn.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True, args=None):
super(GRUODECell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.odefunc = GRUODEfunc(input_size, hidden_size, bias)
self.reset_parameters()
self.args = args
self.integration_time = torch.tensor([0, 1]).float()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x):
# print(x.shape)
x = x.view(-1, x.size(2))
if self.args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
self.integration_time = self.integration_time.type_as(x)
out = odeint(
self.odefunc,
x,
self.integration_time,
rtol=self.args.tol,
atol=self.args.tol,
method=self.args.method,
)
return out[1][None,:]
@property
def nfe(self):
return self.odefunc.nfe
@nfe.setter
def nfe(self, value):
self.odefunc.nfe = value
class GRUCell(nn.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden):
# print(x.shape)
x = x.view(-1, x.size(2))
gate_x = self.x2h(x)
gate_h = self.h2h(hidden)
gate_x = gate_x.squeeze()
gate_h = gate_h.squeeze()
i_r, i_i, i_n = gate_x.chunk(3, 1)
h_r, h_i, h_n = gate_h.chunk(3, 1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy, hy
# class GRUModel(nn.Module):
# def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, bias=True):
# super(GRUModel, self).__init__()
# # Hidden dimensions
# self.hidden_dim = hidden_dim
# # Number of hidden layers
# self.layer_dim = layer_dim
# self.gru_cell = GRUCell(input_dim, hidden_dim, layer_dim)
# self.fc = nn.Linear(hidden_dim, output_dim)
# def forward(self, x):
# # Initialize hidden state with zeros
# #######################
# # USE GPU FOR MODEL #
# #######################
# #print(x.shape,"x.shape")100, 28, 28
# if torch.cuda.is_available():
# h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).cuda())
# else:
# h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))
# outs = []
# hn = h0[0,:,:]
# for seq in range(x.size(1)):
# hn = self.gru_cell(x[:,seq,:], hn)
# outs.append(hn)
# out = outs[-1].squeeze()
# out = self.fc(out)
# # out.size() --> 100, 10
# return out
| StarcoderdataPython |
149148 | <filename>utils.py<gh_stars>0
from hashlib import pbkdf2_hmac
from string import punctuation
from secrets import token_bytes
HASH_ROUNDS = 2**16
def create_header(algo: str) -> str:
return "======== " + algo + " ========\n"
def get_header(cipher: bytes) -> bytes:
header = cipher.split(b"\n")[0]
return header.replace(b"=", b"").replace(b" ", b"").replace(b"\n", b"")
def check_password_strength(password, shorten_rockyou=False):
if len(password) < 10:
print("[!] Password has to be at least 10 characters long.")
return False
if not any(char.isdigit() for char in password):
print("[!] Password has to contain at least one number.")
return False
if not any(char.isupper() for char in password):
print("[!] Password has to contain at least one uppercase character.")
return False
if not any(char.islower() for char in password):
print("[!] Password has to contain at least one lowercase character.")
return False
if not any(char in punctuation for char in password):
print("[!] Password has to contain at least one special character.")
return False
# only skip this if in "shorten rockyou" mode
if not shorten_rockyou:
with open("rockyou_shortened.txt", "rb") as f:
for pw in f:
if password.encode() == pw[:-1]:
print("[!] Password must not be in 'rockyou.txt'.")
return False
return True
def create_salt() -> bytes:
return token_bytes(16)
def get_num_from_password(password: str, n_len: int, salt: bytes, rounds: int = HASH_ROUNDS) -> int:
hashed = pbkdf2_hmac("sha512", password.encode(), salt, rounds)
d_in_next = int.from_bytes(hashed, "big")
d_in = 0
# append hashes until hash is >= n_len -> password123 -> d4fe -> d4fe36ad -> d4fe36ad04ef -> ...
while d_in_next.bit_length() <= n_len:
hashed += pbkdf2_hmac("sha512", hashed, salt, rounds)
d_in_next = int.from_bytes(hashed, "big")
# now if d_in_next is bigger than n -> rightshift so it fits
if d_in_next.bit_length() >= n_len - 1:
return d_in_next >> (d_in_next.bit_length() - n_len + 1)
return d_in
| StarcoderdataPython |
268660 | # -*- coding: utf-8 -*-
"""
test_db.py
testing our database code
Copyright 2017 CodeRatchet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
"""
import logging
import mongoengine
import psutil
from scrapytest.config import config
from scrapytest.types import Article
from pymongo.mongo_client import MongoClient
log = logging.getLogger(__name__)
def test_mongodb_is_up():
mongod_p = next(x for x in psutil.process_iter() if 'mongod' in x.name())
if mongod_p is None or not mongod_p.is_running():
assert False, "mongo db is not currently running!"
try:
import scrapytest.db
except ImportError as e:
log.error("Could not load the database connection, is the Mongo service running?")
exit(1)
def get_test_db_connection():
return mongoengine.connection.get_connection(alias='test_db')
def test_database_exists():
_connection = get_test_db_connection() # type: MongoClient
assert config['test_db']['db_name'] in _connection.database_names()
assert _connection.get_default_database().name == config['test_db']['db_name']
# from now on we can reference the connection
connection = get_test_db_connection()
def test_test_data_exists():
db = connection.get_database(config['test_db']['db_name'])
# we should have populated the article collection
assert Article.__name__.lower() in db.collection_names()
assert db[Article.__name__.lower()].count() > 0
| StarcoderdataPython |
1602982 | <reponame>thezakman/CTF-Toolz
# This file is part of PyBing (http://pybing.googlecode.com).
#
# Copyright (C) 2009 <NAME> http://geewax.org/
# All rights reserved.
#
# This software is licensed as described in the file COPYING.txt,
# which you should have received as part of this distribution.
"""
This module holds the Bing WebQuery class used to do web searches against Bing.
"""
from pybing import constants
from pybing.query import BingQuery, Pagable
class WebQuery(BingQuery, Pagable):
SOURCE_TYPE = constants.WEB_SOURCE_TYPE
| StarcoderdataPython |
3441090 | <reponame>vipermu/dgl
# -*- coding: utf-8 -*-
# pylint: disable=C0103, E1101, C0111
"""
The implementation of neural network layers used in SchNet and MGCN.
"""
import torch
import torch.nn as nn
from torch.nn import Softplus
import numpy as np
from ... import function as fn
class AtomEmbedding(nn.Module):
"""
Convert the atom(node) list to atom embeddings.
The atoms with the same element share the same initial embedding.
Parameters
----------
dim : int
Size of embeddings, default to be 128.
type_num : int
The largest atomic number of atoms in the dataset, default to be 100.
pre_train : None or pre-trained embeddings
Pre-trained embeddings, default to be None.
"""
def __init__(self, dim=128, type_num=100, pre_train=None):
super(AtomEmbedding, self).__init__()
self._dim = dim
self._type_num = type_num
if pre_train is not None:
self.embedding = nn.Embedding.from_pretrained(pre_train, padding_idx=0)
else:
self.embedding = nn.Embedding(type_num, dim, padding_idx=0)
def forward(self, atom_types):
"""
Parameters
----------
atom_types : int64 tensor of shape (B1)
Types for atoms in the graph(s), B1 for the number of atoms.
Returns
-------
float32 tensor of shape (B1, self._dim)
Atom embeddings.
"""
return self.embedding(atom_types)
class EdgeEmbedding(nn.Module):
"""
Module for embedding edges. Edges linking same pairs of atoms share
the same initial embedding.
Parameters
----------
dim : int
Size of embeddings, default to be 128.
edge_num : int
Maximum number of edge types allowed, default to be 3000.
pre_train : Edge embeddings or None
Pre-trained edge embeddings, default to be None.
"""
def __init__(self, dim=128, edge_num=3000, pre_train=None):
super(EdgeEmbedding, self).__init__()
self._dim = dim
self._edge_num = edge_num
if pre_train is not None:
self.embedding = nn.Embedding.from_pretrained(pre_train, padding_idx=0)
else:
self.embedding = nn.Embedding(edge_num, dim, padding_idx=0)
def generate_edge_type(self, edges):
"""Generate edge type.
The edge type is based on the type of the src & dst atom.
Note that directions are not distinguished, e.g. C-O and O-C are the same edge type.
To map a pair of nodes to one number, we use an unordered pairing function here
See more detail in this disscussion:
https://math.stackexchange.com/questions/23503/create-unique-number-from-2-numbers
Note that the edge_num should be larger than the square of maximum atomic number
in the dataset.
Parameters
----------
edges : EdgeBatch
Edges for deciding types
Returns
-------
dict
Stores the edge types in "type"
"""
atom_type_x = edges.src['ntype']
atom_type_y = edges.dst['ntype']
return {
'etype': atom_type_x * atom_type_y + \
(torch.abs(atom_type_x - atom_type_y) - 1) ** 2 / 4
}
def forward(self, g, atom_types):
"""Compute edge embeddings
Parameters
----------
g : DGLGraph
The graph to compute edge embeddings
atom_types : int64 tensor of shape (B1)
Types for atoms in the graph(s), B1 for the number of atoms.
Returns
-------
float32 tensor of shape (B2, self._dim)
Computed edge embeddings
"""
g = g.local_var()
g.ndata['ntype'] = atom_types
g.apply_edges(self.generate_edge_type)
return self.embedding(g.edata.pop('etype'))
class ShiftSoftplus(nn.Module):
"""
ShiftSoftplus activation function:
1/beta * (log(1 + exp**(beta * x)) - log(shift))
Parameters
----------
beta : int
Default to be 1.
shift : int
Default to be 2.
threshold : int
Default to be 20.
"""
def __init__(self, beta=1, shift=2, threshold=20):
super(ShiftSoftplus, self).__init__()
self.shift = shift
self.softplus = Softplus(beta, threshold)
def forward(self, x):
"""Applies the activation function"""
return self.softplus(x) - np.log(float(self.shift))
class RBFLayer(nn.Module):
"""
Radial basis functions Layer.
e(d) = exp(- gamma * ||d - mu_k||^2)
With the default parameters below, we are using a default settings:
* gamma = 10
* 0 <= mu_k <= 30 for k=1~300
Parameters
----------
low : int
Smallest value to take for mu_k, default to be 0.
high : int
Largest value to take for mu_k, default to be 30.
gap : float
Difference between two consecutive values for mu_k, default to be 0.1.
dim : int
Output size for each center, default to be 1.
"""
def __init__(self, low=0, high=30, gap=0.1, dim=1):
super(RBFLayer, self).__init__()
self._low = low
self._high = high
self._dim = dim
self._n_centers = int(np.ceil((high - low) / gap))
centers = np.linspace(low, high, self._n_centers)
self.centers = torch.tensor(centers, dtype=torch.float, requires_grad=False)
self.centers = nn.Parameter(self.centers, requires_grad=False)
self._fan_out = self._dim * self._n_centers
self._gap = centers[1] - centers[0]
def forward(self, edge_distances):
"""
Parameters
----------
edge_distances : float32 tensor of shape (B, 1)
Edge distances, B for the number of edges.
Returns
-------
float32 tensor of shape (B, self._fan_out)
Computed RBF results
"""
radial = edge_distances - self.centers
coef = -1 / self._gap
return torch.exp(coef * (radial ** 2))
class CFConv(nn.Module):
"""
The continuous-filter convolution layer in SchNet.
Parameters
----------
rbf_dim : int
Dimension of the RBF layer output
dim : int
Dimension of output, default to be 64
act : activation function or None.
Activation function, default to be shifted softplus
"""
def __init__(self, rbf_dim, dim=64, act=None):
super(CFConv, self).__init__()
self._rbf_dim = rbf_dim
self._dim = dim
if act is None:
activation = nn.Softplus(beta=0.5, threshold=14)
else:
activation = act
self.project = nn.Sequential(
nn.Linear(self._rbf_dim, self._dim),
activation,
nn.Linear(self._dim, self._dim)
)
def forward(self, g, node_weight, rbf_out):
"""
Parameters
----------
g : DGLGraph
The graph for performing convolution
node_weight : float32 tensor of shape (B1, D1)
The weight of nodes in message passing, B1 for number of nodes and
D1 for node weight size.
rbf_out : float32 tensor of shape (B2, D2)
The output of RBFLayer, B2 for number of edges and D2 for rbf out size.
"""
g = g.local_var()
e = self.project(rbf_out)
g.ndata['node_weight'] = node_weight
g.edata['e'] = e
g.update_all(fn.u_mul_e('node_weight', 'e', 'm'), fn.sum('m', 'h'))
return g.ndata.pop('h')
class Interaction(nn.Module):
"""
The interaction layer in the SchNet model.
Parameters
----------
rbf_dim : int
Dimension of the RBF layer output
dim : int
Dimension of intermediate representations
"""
def __init__(self, rbf_dim, dim):
super(Interaction, self).__init__()
self._dim = dim
self.node_layer1 = nn.Linear(dim, dim, bias=False)
self.cfconv = CFConv(rbf_dim, dim, Softplus(beta=0.5, threshold=14))
self.node_layer2 = nn.Sequential(
nn.Linear(dim, dim),
Softplus(beta=0.5, threshold=14),
nn.Linear(dim, dim)
)
def forward(self, g, n_feat, rbf_out):
"""
Parameters
----------
g : DGLGraph
The graph for performing convolution
n_feat : float32 tensor of shape (B1, D1)
Node features, B1 for number of nodes and D1 for feature size.
rbf_out : float32 tensor of shape (B2, D2)
The output of RBFLayer, B2 for number of edges and D2 for rbf out size.
Returns
-------
float32 tensor of shape (B1, D1)
Updated node representations
"""
n_weight = self.node_layer1(n_feat)
new_n_feat = self.cfconv(g, n_weight, rbf_out)
new_n_feat = self.node_layer2(new_n_feat)
return n_feat + new_n_feat
class VEConv(nn.Module):
"""
The Vertex-Edge convolution layer in MGCN which takes both edge & vertex features
in consideration.
Parameters
----------
rbf_dim : int
Size of the RBF layer output
dim : int
Size of intermediate representations, default to be 64.
update_edge : bool
Whether to apply a linear layer to update edge representations, default to be True.
"""
def __init__(self, rbf_dim, dim=64, update_edge=True):
super(VEConv, self).__init__()
self._rbf_dim = rbf_dim
self._dim = dim
self._update_edge = update_edge
self.update_rbf = nn.Sequential(
nn.Linear(self._rbf_dim, self._dim),
nn.Softplus(beta=0.5, threshold=14),
nn.Linear(self._dim, self._dim)
)
self.update_efeat = nn.Linear(self._dim, self._dim)
def forward(self, g, n_feat, e_feat, rbf_out):
"""
Parameters
----------
g : DGLGraph
The graph for performing convolution
n_feat : float32 tensor of shape (B1, D1)
Node features, B1 for number of nodes and D1 for feature size.
e_feat : float32 tensor of shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
rbf_out : float32 tensor of shape (B2, D3)
The output of RBFLayer, B2 for number of edges and D3 for rbf out size.
Returns
-------
n_feat : float32 tensor
Updated node features.
e_feat : float32 tensor
(Potentially updated) edge features
"""
rbf_out = self.update_rbf(rbf_out)
if self._update_edge:
e_feat = self.update_efeat(e_feat)
g = g.local_var()
g.ndata.update({'n_feat': n_feat})
g.edata.update({'rbf_out': rbf_out, 'e_feat': e_feat})
g.update_all(message_func=[fn.u_mul_e('n_feat', 'rbf_out', 'm_0'),
fn.copy_e('e_feat', 'm_1')],
reduce_func=[fn.sum('m_0', 'n_feat_0'),
fn.sum('m_1', 'n_feat_1')])
n_feat = g.ndata.pop('n_feat_0') + g.ndata.pop('n_feat_1')
return n_feat, e_feat
class MultiLevelInteraction(nn.Module):
"""
The multilevel interaction in the MGCN model.
Parameters
----------
rbf_dim : int
Dimension of the RBF layer output
dim : int
Dimension of intermediate representations
"""
def __init__(self, rbf_dim, dim):
super(MultiLevelInteraction, self).__init__()
self._atom_dim = dim
self.node_layer1 = nn.Linear(dim, dim, bias=True)
self.conv_layer = VEConv(rbf_dim, dim)
self.activation = nn.Softplus(beta=0.5, threshold=14)
self.edge_layer1 = nn.Linear(dim, dim, bias=True)
self.node_out = nn.Sequential(
nn.Linear(dim, dim),
nn.Softplus(beta=0.5, threshold=14),
nn.Linear(dim, dim)
)
def forward(self, g, n_feat, e_feat, rbf_out):
"""
Parameters
----------
g : DGLGraph
The graph for performing convolution
n_feat : float32 tensor of shape (B1, D1)
Node features, B1 for number of nodes and D1 for feature size.
e_feat : float32 tensor of shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
rbf_out : float32 tensor of shape (B2, D3)
The output of RBFLayer, B2 for number of edges and D3 for rbf out size.
Returns
-------
n_feat : float32 tensor
Updated node representations
e_feat : float32 tensor
Updated edge representations
"""
new_n_feat = self.node_layer1(n_feat)
new_n_feat, e_feat = self.conv_layer(g, new_n_feat, e_feat, rbf_out)
new_n_feat = self.node_out(new_n_feat)
n_feat = n_feat + new_n_feat
e_feat = self.activation(self.edge_layer1(e_feat))
return n_feat, e_feat
| StarcoderdataPython |
4896789 | from typing import Any, Dict, Optional
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter
import numpy as np
import torch.optim.lr_scheduler
from torch.optim.lr_scheduler import _LRScheduler
from autoPyTorch.pipeline.components.setup.lr_scheduler.base_scheduler import BaseLRComponent
class CosineAnnealingWarmRestarts(BaseLRComponent):
"""
Set the learning rate of each parameter group using a cosine annealing schedule,
where \eta_{max}ηmax is set to the initial lr, T_{cur} is the number of epochs
since the last restart and T_{i} is the number of epochs between two warm
restarts in SGDR
Args:
T_0 (int): Number of iterations for the first restart
T_mult (int): A factor increases T_{i} after a restart
random_state (Optional[np.random.RandomState]): random state
"""
def __init__(
self,
T_0: int,
T_mult: int,
random_state: Optional[np.random.RandomState] = None
):
super().__init__()
self.T_0 = T_0
self.T_mult = T_mult
self.random_state = random_state
self.scheduler = None # type: Optional[_LRScheduler]
def fit(self, X: Dict[str, Any], y: Any = None) -> BaseLRComponent:
"""
Fits a component by using an input dictionary with pre-requisites
Args:
X (X: Dict[str, Any]): Dependencies needed by current component to perform fit
y (Any): not used. To comply with sklearn API
Returns:
A instance of self
"""
# Make sure there is an optimizer
self.check_requirements(X, y)
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer=X['optimizer'],
T_0=int(self.T_0),
T_mult=int(self.T_mult),
)
return self
@staticmethod
def get_properties(dataset_properties: Optional[Dict[str, Any]] = None) -> Dict[str, str]:
return {
'shortname': 'CosineAnnealingWarmRestarts',
'name': 'Cosine Annealing WarmRestarts',
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties: Optional[Dict] = None
) -> ConfigurationSpace:
T_0 = UniformIntegerHyperparameter(
"T_0", 1, 20, default_value=1)
T_mult = UniformFloatHyperparameter(
"T_mult", 1.0, 2.0, default_value=1.0)
cs = ConfigurationSpace()
cs.add_hyperparameters([T_0, T_mult])
return cs
| StarcoderdataPython |
12854707 | mass = eval(input("Enter the amount of water in kilograms: "))
initial_temp = eval(input("Enter the initial temperature: "))
final_temp = eval(input("Enter the final temperature: "))
energy = mass * (final_temp - initial_temp) * 4184
print("The energy needed is {}".format(energy))
| StarcoderdataPython |
5169012 | <filename>pydmd/dmdc.py
"""
Derived module from dmdbase.py for dmd with control.
Reference:
- <NAME>., <NAME>. and <NAME>., 2016. Dynamic mode decomposition
with control. SIAM Journal on Applied Dynamical Systems, 15(1), pp.142-161.
"""
from past.utils import old_div
import numpy as np
from .dmdbase import DMDBase
from .dmdoperator import DMDOperator
from .utils import compute_tlsq, compute_svd
class DMDControlOperator(DMDOperator):
"""
DMD with control base operator. This should be subclassed in order to
implement the appropriate features.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param svd_rank_omega: the rank for the truncation of the aumented matrix
omega composed by the left snapshots matrix and the control. Used only
for the `_fit_B_unknown` method of this class. It should be greater or
equal than `svd_rank`. For the possible values please refer to the
`svd_rank` parameter description above.
:type svd_rank_omega: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
"""
def __init__(self, svd_rank, svd_rank_omega, tlsq_rank):
super(DMDControlOperator, self).__init__(svd_rank=svd_rank, exact=True,
rescale_mode=None,
forward_backward=False,
sorted_eigs=False,
tikhonov_regularization=None)
self._svd_rank_omega = svd_rank_omega
self._tlsq_rank = tlsq_rank
class DMDBKnownOperator(DMDControlOperator):
"""
DMD with control base operator when B is given.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param svd_rank_omega: the rank for the truncation of the aumented matrix
omega composed by the left snapshots matrix and the control. Used only
for the `_fit_B_unknown` method of this class. It should be greater or
equal than `svd_rank`. For the possible values please refer to the
`svd_rank` parameter description above.
:type svd_rank_omega: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
"""
def compute_operator(self, X, Y, B, controlin):
"""
Compute the low-rank operator. This is the standard version of the DMD
operator, with a correction which depends on B.
:param numpy.ndarray X: matrix containing the snapshots x0,..x{n-1} by
column.
:param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by
column.
:param numpy.ndarray B: the matrix B.
:param numpy.ndarray control: the control input.
:return: the (truncated) left-singular vectors matrix, the (truncated)
singular values array, the (truncated) right-singular vectors
matrix of X.
:rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray
"""
X, Y = compute_tlsq(X, Y, self._tlsq_rank)
Y = Y - B.dot(controlin)
return super(DMDBKnownOperator, self).compute_operator(X, Y)
class DMDBUnknownOperator(DMDControlOperator):
"""
DMD with control base operator when B is unknown.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param svd_rank_omega: the rank for the truncation of the aumented matrix
omega composed by the left snapshots matrix and the control. Used only
for the `_fit_B_unknown` method of this class. It should be greater or
equal than `svd_rank`. For the possible values please refer to the
`svd_rank` parameter description above.
:type svd_rank_omega: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
"""
def compute_operator(self, X, Y, controlin):
"""
Compute the low-rank operator.
:param numpy.ndarray X: matrix containing the snapshots x0,..x{n-1} by
column.
:param numpy.ndarray Y: matrix containing the snapshots x1,..x{n} by
column.
:param numpy.ndarray control: the control input.
:return: the (truncated) left-singular vectors matrix of Y, and
the product between the left-singular vectors of Y and Btilde.
:rtype: numpy.ndarray, numpy.ndarray
"""
snapshots_rows = X.shape[0]
omega = np.vstack([X, controlin])
Up, sp, Vp = compute_svd(omega, self._svd_rank_omega)
Up1 = Up[:snapshots_rows, :]
Up2 = Up[snapshots_rows:, :]
Ur, _, _ = compute_svd(Y, self._svd_rank)
self._Atilde = np.linalg.multi_dot([Ur.T.conj(), Y, Vp,
np.diag(np.reciprocal(sp)),
Up1.T.conj(), Ur])
self._compute_eigenquantities()
self._compute_modes(Y, sp, Vp, Up1, Ur)
Btilde = np.linalg.multi_dot([Ur.T.conj(), Y, Vp,
np.diag(np.reciprocal(sp)),
Up2.T.conj()])
return Ur, Ur.dot(Btilde)
def _compute_modes(self, Y, sp, Vp, Up1, Ur):
"""
Private method that computes eigenvalues and eigenvectors of the
high-dimensional operator (stored in self.modes and self.Lambda).
"""
self._modes = np.linalg.multi_dot([Y, Vp, np.diag(np.reciprocal(sp)),
Up1.T.conj(), Ur,
self.eigenvectors])
self._Lambda = self.eigenvalues
class DMDc(DMDBase):
"""
Dynamic Mode Decomposition with control.
This version does not allow to manipulate the temporal window within the
system is reconstructed.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
:param opt: argument to control the computation of DMD modes amplitudes.
See :class:`DMDBase`. Default is False.
:type opt: bool or int
:param svd_rank_omega: the rank for the truncation of the aumented matrix
omega composed by the left snapshots matrix and the control. Used only
for the `_fit_B_unknown` method of this class. It should be greater or
equal than `svd_rank`. For the possible values please refer to the
`svd_rank` parameter description above.
:type svd_rank_omega: int or float
"""
def __init__(self, svd_rank=0, tlsq_rank=0, opt=False, svd_rank_omega=-1):
# we're going to initialize Atilde when we know if B is known
self._Atilde = None
# remember the arguments for when we'll need them
self._dmd_operator_kwargs = {
'svd_rank': svd_rank,
'svd_rank_omega': svd_rank_omega,
'tlsq_rank': tlsq_rank
}
self._opt = opt
self._B = None
self._snapshots_shape = None
self._controlin = None
self._controlin_shape = None
self._basis = None
self._modes_activation_bitmask_proxy = None
@property
def svd_rank_omega(self):
return self.operator._svd_rank_omega
@property
def B(self):
"""
Get the operator B.
:return: the operator B.
:rtype: numpy.ndarray
"""
return self._B
@property
def basis(self):
"""
Get the basis used to reduce the linear operator to the low dimensional
space.
:return: the matrix which columns are the basis vectors.
:rtype: numpy.ndarray
"""
return self._basis
def reconstructed_data(self, control_input=None):
"""
Return the reconstructed data, computed using the `control_input`
argument. If the `control_input` is not passed, the original input (in
the `fit` method) is used. The input dimension has to be consistent
with the dynamics.
:param numpy.ndarray control_input: the input control matrix.
:return: the matrix that contains the reconstructed snapshots.
:rtype: numpy.ndarray
"""
if control_input is None:
controlin, controlin_shape = self._controlin, self._controlin_shape
else:
controlin, controlin_shape = self._col_major_2darray(control_input)
if controlin.shape[1] != self.dynamics.shape[1] - 1:
raise RuntimeError(
'The number of control inputs and the number of snapshots to '
'reconstruct has to be the same'
)
eigs = np.power(self.eigs,
old_div(self.dmd_time['dt'], self.original_time['dt']))
A = np.linalg.multi_dot([self.modes, np.diag(eigs),
np.linalg.pinv(self.modes)])
data = [self._snapshots[:, 0]]
for i, u in enumerate(controlin.T):
data.append(A.dot(data[i]) + self._B.dot(u))
data = np.array(data).T
return data
def fit(self, X, I, B=None):
"""
Compute the Dynamic Modes Decomposition with control given the original
snapshots and the control input data. The matrix `B` that controls how
the control input influences the system evolution can be provided by
the user; otherwise, it is computed by the algorithm.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
:param I: the control input.
:type I: numpy.ndarray or iterable
:param numpy.ndarray B: matrix that controls the control input
influences the system evolution.
:type B: numpy.ndarray or iterable
"""
self._snapshots, self._snapshots_shape = self._col_major_2darray(X)
self._controlin, self._controlin_shape = self._col_major_2darray(I)
n_samples = self._snapshots.shape[1]
X = self._snapshots[:, :-1]
Y = self._snapshots[:, 1:]
self._set_initial_time_dictionary(
{"t0": 0, "tend": n_samples - 1, "dt": 1}
)
if B is None:
self._Atilde = DMDBUnknownOperator(**self._dmd_operator_kwargs)
self._basis, self._B = self.operator.compute_operator(
X, Y, self._controlin)
else:
self._Atilde = DMDBKnownOperator(**self._dmd_operator_kwargs)
U, _, _ = self.operator.compute_operator(X, Y, B, self._controlin)
self._basis = U
self._B = B
self._b = self._compute_amplitudes()
return self
| StarcoderdataPython |
9613346 | <gh_stars>1-10
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
from ...data import data
from ...service.exceptions import OpenDataServiceException
# Logging/tracing configuration
logger = Logger(service="Exception handler")
tracer = Tracer(service="Exception handler")
class GenericExceptionHandler(AbstractExceptionHandler):
"""
Catch All Exception handler.
This handler catches all kinds of exceptions and prints
the stack trace on AWS Cloudwatch with the request envelope.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
_ = handler_input.attributes_manager.request_attributes["_"]
session_attributes = handler_input.attributes_manager.session_attributes
session_attributes["repeat_prompt"] = _(data.GENERIC_ERROR)
handler_input.response_builder.speak(_(data.GENERIC_ERROR)).ask(
_(data.GENERIC_ERROR_REPROMPT)
)
return handler_input.response_builder.response
class OpenDataAPIExceptionHandler(AbstractExceptionHandler):
"""
Exception handler for Open Data API exceptions.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, OpenDataServiceException) -> bool
return isinstance(exception, OpenDataServiceException)
def handle(self, handler_input, exception):
# type: (HandlerInput, e) -> Response
logger.error(exception, exc_info=True)
_ = handler_input.attributes_manager.request_attributes["_"]
session_attributes = handler_input.attributes_manager.session_attributes
session_attributes["repeat_prompt"] = _(data.OPEN_DATA_API_ERROR)
handler_input.response_builder.speak(_(data.OPEN_DATA_API_ERROR_REPROMPT)).ask(
_(data.OPEN_DATA_API_ERROR_REPROMPT)
)
return handler_input.response_builder.response
| StarcoderdataPython |
1718957 | <reponame>kneasle/belltower<filename>examples/rounds.py
# Import the tower class, and 'time.sleep'
import time
from belltower import *
# Number of seconds between each bell stroke
BELL_GAP = 0.3
# Number of strokes that would fit into the handstroke gap
HANDSTROKE_GAP = 1
# Create a new tower, and tell it to join tower ID 765432918
tower = RingingRoomTower(765432918)
# The 'with' block makes sure that 'tower' has a chance to gracefully shut
# down the connection if the program crashes
with tower:
# Wait until the tower is loaded
tower.wait_loaded()
# Set the bells at hand, call look to and wait for the sound to finish
tower.set_at_hand()
tower.call_look_to()
time.sleep(3)
# Keep count of how many rows have been rung
row_index = 0
while True:
# Figure out what stroke we're on
stroke = Stroke.from_index(row_index)
# Implement handstroke gap
if stroke.is_hand():
time.sleep(BELL_GAP * HANDSTROKE_GAP)
# Ring the row, expecting the bells to be on the right strokes
for i in range(tower.number_of_bells):
tower.ring_bell(Bell.from_index(i), stroke)
time.sleep(BELL_GAP)
# Increase the row count
row_index += 1
| StarcoderdataPython |
9631827 | import os, sys
import bpy
from mathutils import Matrix, Vector
# Create directory if not existed
def create_dir(dir):
if not os.path.isdir(dir):
os.makedirs(dir)
# Compute the calibration matrix K of camera
def get_calibration_matrix_K_from_blender(camd):
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camd.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal), the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px*scale / 2
v_0 = resolution_y_in_px*scale / 2
skew = 0 # only use rectangular pixels
K = Matrix(((alpha_u, skew, u_0),
(0, alpha_v, v_0),
(0, 0, 1)))
return K
# Function to clean the blender workspace
def remove_obj_lamp_and_mesh(context):
scene = context.scene
objs = bpy.data.objects
meshes = bpy.data.meshes
for obj in objs:
if obj.type == 'MESH' or obj.type == 'LAMP':
scene.objects.unlink(obj)
objs.remove(obj)
for mesh in meshes:
meshes.remove(mesh)
# Render the current frame with a redirection of the flow in a log file
def render_without_output(use_antialiasing=True):
# redirect output to log file
logfile = 'blender_render.log'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
# Render
bpy.context.scene.render.use_antialiasing = use_antialiasing
bpy.ops.render.render(write_still=True)
# disable output redirection
os.close(1)
os.dup(old)
os.close(old)
# Creating a lamp with an appropriate energy
def make_lamp(rad):
# Create new lamp datablock
lamp_data = bpy.data.lamps.new(name="Lamp", type='POINT')
lamp_data.distance = rad * 2.5
lamp_data.energy = rad / 30.0
# Create new object with our lamp datablock
lamp_object = bpy.data.objects.new(name="Lamp", object_data=lamp_data)
# Link lamp object to the scene so it'll appear in this scene
scene = bpy.context.scene
scene.objects.link(lamp_object)
lamp_object.location = (0, 0, 0)
return lamp_object
# Setup the environment
def setup_env(scene, depth=False, normal=False, height=480, width=640, clip_end=2000):
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.alpha_mode = 'TRANSPARENT'
bpy.context.scene.camera.data.clip_end = clip_end
if depth is False:
return True
elif normal is False:
# Set up rendering of depth map:
scene.use_nodes = True
tree = scene.node_tree
links = tree.links
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create input render layer node
rl = tree.nodes.new('CompositorNodeRLayers')
map = tree.nodes.new(type="CompositorNodeMapValue")
links.new(rl.outputs['Z'], map.inputs[0])
invert = tree.nodes.new(type="CompositorNodeInvert")
links.new(map.outputs[0], invert.inputs[1])
# Create a file output node for depth
depthFileOutput = tree.nodes.new(type="CompositorNodeOutputFile")
depthFileOutput.label = 'Depth Output'
links.new(invert.outputs[0], depthFileOutput.inputs[0])
return depthFileOutput
else:
# Set up rendering of depth map:
scene.use_nodes = True
tree = scene.node_tree
links = tree.links
# Add passes for additionally dumping albed and normals.
bpy.context.scene.render.layers["RenderLayer"].use_pass_normal = True
bpy.context.scene.render.layers["RenderLayer"].use_pass_color = True
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create input render layer node
rl = tree.nodes.new('CompositorNodeRLayers')
map = tree.nodes.new(type="CompositorNodeMapValue")
# Size is chosen kind of arbitrarily, try out until you're satisfied with
# resulting depth map.
map.offset = [0]
map.size = [0.4]
map.use_min = True
map.min = [0]
map.use_max = True
map.max = [255]
links.new(rl.outputs['Z'], map.inputs[0])
invert = tree.nodes.new(type="CompositorNodeInvert")
links.new(map.outputs[0], invert.inputs[1])
# create a file output node and set the path
depthFileOutput = tree.nodes.new(type="CompositorNodeOutputFile")
depthFileOutput.label = 'Depth Output'
links.new(invert.outputs[0], depthFileOutput.inputs[0])
scale_normal = tree.nodes.new(type="CompositorNodeMixRGB")
scale_normal.blend_type = 'MULTIPLY'
# scale_normal.use_alpha = True
scale_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 1)
links.new(rl.outputs['Normal'], scale_normal.inputs[1])
bias_normal = tree.nodes.new(type="CompositorNodeMixRGB")
bias_normal.blend_type = 'ADD'
# bias_normal.use_alpha = True
bias_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 0)
links.new(scale_normal.outputs[0], bias_normal.inputs[1])
normalFileOutput = tree.nodes.new(type="CompositorNodeOutputFile")
normalFileOutput.label = 'Normal Output'
links.new(bias_normal.outputs[0], normalFileOutput.inputs[0])
return depthFileOutput, normalFileOutput
| StarcoderdataPython |
4884140 | <reponame>matthewdgroves/Practice
# Name:
# Date:
# proj01: A Simple Program
# This program asks the user for his/her name and age.
# Then, it prints a sentence that says when the user will turn 100.
# If you complete extensions, describe your extensions here!
| StarcoderdataPython |
5141541 | <gh_stars>0
import rubin_sim.maf.metrics as metrics
import rubin_sim.maf.slicers as slicers
import rubin_sim.maf.metricBundles as mb
import rubin_sim.maf.plots as plots
from .colMapDict import ColMapDict
from .common import filterList
__all__ = ['altazHealpix', 'altazLambert']
def basicSetup(metricName, colmap=None, nside=64):
if colmap is None:
colmap = ColMapDict('opsimV4')
slicer = slicers.HealpixSlicer(nside=nside, latCol=colmap['alt'], lonCol=colmap['az'],
latLonDeg=colmap['raDecDeg'], useCache=False)
metric = metrics.CountMetric(colmap['mjd'], metricName=metricName)
return colmap, slicer, metric
def altazHealpix(colmap=None, runName='opsim', extraSql=None,
extraMetadata=None, metricName='NVisits Alt/Az'):
"""Generate a set of metrics measuring the number visits as a function of alt/az
plotted on a HealpixSkyMap.
Parameters
----------
colmap : dict, optional
A dictionary with a mapping of column names. Default will use OpsimV4 column names.
runName : str, optional
The name of the simulated survey. Default is "opsim".
extraSql : str, optional
Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
Default None, for no additional constraints.
extraMetadata : str, optional
Additional metadata to add before any below (i.e. "WFD"). Default is None.
metricName : str, optional
Unique name to assign to metric
Returns
-------
metricBundleDict
"""
colmap, slicer, metric = basicSetup(metricName=metricName, colmap=colmap)
# Set up basic all and per filter sql constraints.
filterlist, colors, orders, sqls, metadata = filterList(all=True,
extraSql=extraSql,
extraMetadata=extraMetadata)
bundleList = []
plotDict = {'rot': (90, 90, 90), 'flip': 'geo'}
plotFunc = plots.HealpixSkyMap()
for f in filterlist:
if f == 'all':
subgroup = 'All Observations'
else:
subgroup = 'Per filter'
displayDict = {'group': 'Alt/Az', 'order': orders[f], 'subgroup': subgroup,
'caption':
'Pointing History on the alt-az sky (zenith center) for filter %s' % f}
bundle = mb.MetricBundle(metric, slicer, sqls[f], plotDict=plotDict,
runName=runName, metadata = metadata[f],
plotFuncs=[plotFunc], displayDict=displayDict)
bundleList.append(bundle)
for b in bundleList:
b.setRunName(runName)
return mb.makeBundlesDictFromList(bundleList)
def altazLambert(colmap=None, runName='opsim', extraSql=None,
extraMetadata=None, metricName='Nvisits as function of Alt/Az'):
"""Generate a set of metrics measuring the number visits as a function of alt/az
plotted on a LambertSkyMap.
Parameters
----------
colmap : dict, optional
A dictionary with a mapping of column names. Default will use OpsimV4 column names.
runName : str, optional
The name of the simulated survey. Default is "opsim".
extraSql : str, optional
Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
Default None, for no additional constraints.
extraMetadata : str, optional
Additional metadata to add before any below (i.e. "WFD"). Default is None.
metricName : str, optional
Unique name to assign to metric
Returns
-------
metricBundleDict
"""
colmap, slicer, metric = basicSetup(metricName=metricName, colmap=colmap)
# Set up basic all and per filter sql constraints.
filterlist, colors, orders, sqls, metadata = filterList(all=True,
extraSql=extraSql,
extraMetadata=extraMetadata)
bundleList = []
plotFunc = plots.LambertSkyMap()
for f in filterlist:
if f == 'all':
subgroup = 'All Observations'
else:
subgroup = 'Per filter'
displayDict = {'group': 'Alt/Az', 'order': orders[f], 'subgroup': subgroup,
'caption':
'Alt/Az pointing distribution for filter %s' % f}
bundle = mb.MetricBundle(metric, slicer, sqls[f],
runName=runName, metadata = metadata[f],
plotFuncs=[plotFunc], displayDict=displayDict)
bundleList.append(bundle)
for b in bundleList:
b.setRunName(runName)
return mb.makeBundlesDictFromList(bundleList)
| StarcoderdataPython |
5100731 | <filename>pypage/_jinja2.py
from pypage._html import Tag, State
def _val_(name):
return '{{ %s }}' % name
def _stmt_(stmt):
return '{% ' + stmt + ' %}'
class pystmt(Tag):
def __init__(self, stmt):
super().__init__(stmt, html=False)
class _if_(pystmt):
def __init__(self, cond):
super().__init__('if ' + cond)
class _for_(pystmt):
def __init__(self, cond):
super().__init__('for ' + cond)
FOR = _for_
IF = _if_
VAL = _val_
STMT = _stmt_
class Snippet(object):
'''
A reusable html snippet associated with python logic. This can make the pair of frontend and backend more modular.
One can use it like
class NameSnippet(Snippet):
def __init__(self, name):
self.name = name
super().__init__()
@property
def view(self):
Tag('h1', "my name is %s" % VAL(self.wrap_key('name))
@property
def logic(self):
return {
self.KEY('name') : 'superjomn',
}
and reuse this snippet anywhere for any times, and finally, in a flask application, one can render a page with
Snippets like
@app.route('/')
def index():
return
'''
id_prefix = "snip"
counter = 0
def __init__(self, id=None):
self.id = id if id is not None else "%s__%d" % (Snippet.id_prefix,
Snippet.counter)
Snippet.counter += 1
@property
def html(self):
''' The pypage Tags.
For example:
Tag('b', VAL('name'))
'''
raise NotImplementedError
def logic(self):
'''
Parameters for this Jinja2 template snippet.
:return: An dict for jinja2 templates.
'''
raise NotImplementedError
def KEY(self, name):
''' Wrap a key's name to a unique id, so that the same variable name in different snippet will be unique. '''
return "%s_%s" % (self.id, name)
def VAL(self, name):
return VAL(self.KEY(name))
def merge_logics(*logics):
''' merge dicts '''
# check no duplicate keys
keys = set()
res = dict()
for logic in logics:
for key in logic.keys():
assert key not in keys, "duplicate logic keys"
keys.add(key)
res.update(logic)
return res
if __name__ == '__main__':
State.switch_gstate(State())
with _if_('name is not None'):
Tag('b', 'hello world')
with _if_('True') as f:
Tag('b', 'this is true')
f.add(_stmt_('else'), -1)
Tag('b', 'this is false')
with _for_('user in names'):
Tag('h1', 'user %s sex is %s' % (_val_('user.name'),
_val_('user.sex')))
print(State.gstate.compile())
| StarcoderdataPython |
114163 | from datetime import date
ano = int(input('Ano de nascimento: '))
atual = date.today().year
idade = atual - ano
print('Atletas nascidos em {} tem {} anos em {}.'.format(ano, idade, atual))
if idade <= 9:
print('Sua categoria é a MIRIM.')
elif idade <= 14:
print('Sua categoria é a INFANTIL.')
elif idade <= 19:
print('Sua categoria é a JÚNIOR.')
elif idade <= 25:
print('Sua categoria é a SENIOR.')
else:
print('Sua categoria é a MASTER.') | StarcoderdataPython |
4965498 | from stable_baselines3.ppo_single_level.policies import CnnPolicy, MlpPolicy, MultiInputPolicy
from stable_baselines3.ppo_single_level.ppo_single_level import PPO_SL
| StarcoderdataPython |
11235682 | <reponame>ferrerinicolas/python_samples<filename>6. Functions/6.5 Try - Except/6.5.4 Name and Age.py
"""
This program asks the user for their name and age. It handles the case where the
user fails to enter a valid integer for their age.
"""
# Ask user for name and age.
# Enter default value for age in case they do not enter an integer
name = input("Enter your name: ")
age = -1
try:
age = int(input("Enter your age: "))
except ValueError:
print("That wasn't an integer.")
# Print name and age, using default age if user did not enter an integer
print("Name: " + name)
print("Age: " + str(age)) | StarcoderdataPython |
1669124 | <reponame>rs-ds/cookiecutter-sanic
from sanic import Sanic
from sanic.response import json
from sanic_openapi import swagger_blueprint, doc
from {{cookiecutter.app_name}}.blueprint.health import health
{% if cookiecutter.enable_orm == 'true' -%}
from {{cookiecutter.app_name}}.model import DATABASE
from {{cookiecutter.app_name}}.blueprint.user import user
from {{cookiecutter.app_name}}.util import setup_database_creation_listener
{%- endif %}
{% if cookiecutter.enable_rate_limiter == 'true' -%}
from {{cookiecutter.app_name}}.util import setup_rate_limiter
{%- endif %}
app = Sanic(__name__)
{% if cookiecutter.enable_rate_limiter == 'true' -%}
limiter = setup_rate_limiter(app)
{%- endif %}
app.blueprint(swagger_blueprint)
app.blueprint(health)
{% if cookiecutter.enable_orm == 'true' -%}
app.blueprint(user)
setup_database_creation_listener(app, DATABASE)
{%- endif %}
@app.route("/")
async def default(request):
return json({"message": "hello Sanic!"})
| StarcoderdataPython |
5118761 | <gh_stars>0
import numpy as np
import math
def euclidiana(vetor): # norma-2 vetorial
n, x = len(vetor), 0
for i in range(n):
x += math.fabs(vetor[i]) ** 2
return x ** (1/2)
def manhattan(vetor): # norma-1 vetorial
n, x = len(vetor), 0
for i in range(n):
x += math.fabs(vetor[i])
return x
def p(vetor, p): #norma-p vetorial
n, x = len(vetor), 0
for i in range(n):
x += math.fabs(math.pow(vetor[i], p))
return x ** (1/p)
def infinita(vetor): # norma-infinita vetorial
n, max = len(vetor), vetor[0]
for i in range(1, n):
if math.fabs(vetor[i]) > max:
max = math.fabs(vetor[i])
return max
def frobenius(matriz_a): # norma-2 matricial
n, x = len(matriz_a), 0
for i in range(n):
for j in range(n):
x += math.pow(math.fabs(matriz_a[i,j]), 2)
return x ** (1/2)
def soma_coluna(matriz_a): # norma-1 matricial
n, max, x = len(matriz_a), 0, 0
for j in range(n):
for i in range(n):
x += math.fabs(matriz_a[i,j])
if x > max:
max = x
return max
def soma_linha(matriz_a): # norma-infinita matricial
n, max, x = len(matriz_a), 0, 0
for i in range(n):
for j in range(n):
x += math.fabs(matriz_a[i,j])
if x > max:
max = x
return max
def residual(matriz_a, vetor_b, delta_x): #norma-residual matricial
n, k = len(matriz_a), np.linalg.cond(matriz_a)
delta_b = np.matmul(matriz_a, delta_x)
vetor_r = vetor_b - delta_b
vetor_x = np.ones(n)
vetor_x_menos_delta_x = vetor_x - delta_x
residuo_r_b = euclidiana(vetor_r)/euclidiana(vetor_b)
print("Vetor residual:\n", vetor_r)
print("Resíduo da solução x:", residuo_r_b)
if euclidiana(vetor_x_menos_delta_x)/euclidiana(vetor_x) <= k*residuo_r_b:
print("A solução encontrada é precisa.")
else:
print("A solução encontrada não é precisa.") | StarcoderdataPython |
11304423 | source=[{'date':'2017-2-1','name':"a",'value':1},\
{'date':'2017-2-1','name':"c",'value':3},\
{'date':'2017-2-1','name':"b",'value':2},\
{'date':'2017-2-2','name':"b",'value':1}]
data = dict()
for i in source:
if i['date'] not in data.keys():
data[i['date']]=dict()
data[i['date']]['name'] = list()
data[i['date']]['value'] = list()
data[i['date']]['name'].append(i['name'])
data[i['date']]['value'].append(i['value'])
else:
if i['name'] not in data[i['date']]['name']:
data[i['date']]['name'].append(i['name'])
# if i['value'] not in data[i['date']]['value']:
data[i['date']]['value'].append(i['value'])
print data | StarcoderdataPython |
3302008 | """put lrf table back
Revision ID: d10c6bfdd9aa
Revises: 65c5753b57e0
Create Date: 2021-01-11 10:31:39.441091
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mssql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "65c5753b57e0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_lrf_data_postal_code", table_name="lrf_data")
op.drop_table("lrf_data")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"lrf_data",
sa.Column("postal_code", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column(
"http://webprotege.stanford.edu/R9vkBr0EApzeMGfa0rJGo9G",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RJAL6Zu9F3EHB35HCs3cYD",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RcIHdxpjQwjr8EG8yMhEYV",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RDudF9SBo28CKqKpRN9poYL",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RLc1ySxaRs4HWkW4m5w2Me",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
)
op.create_index(
"ix_lrf_data_postal_code", "lrf_data", ["postal_code"], unique=False
)
# ### end Alembic commands ###
| StarcoderdataPython |
5008322 | import numpy as np
import pytest
from bayesian_testing.metrics.posteriors import (
beta_posteriors_all,
lognormal_posteriors,
dirichlet_posteriors,
)
BETA_POSTERIORS_ALL_INPUTS = [
{
"totals": [10, 20, 30],
"successes": [8, 16, 24],
"sim_count": 10,
"a_priors_beta": [0.5, 0.5, 0.5],
"b_priors_beta": [0.5, 0.5, 0.5],
},
{
"totals": [20, 30],
"successes": [16, 24],
"sim_count": 20,
"a_priors_beta": [0.5, 0.5],
"b_priors_beta": [0.5, 0.5],
},
]
LOGNORMAL_POSTERIORS_INPUTS = [
{
"totals": 1580,
"sum_logs": 3831.806394737816,
"sum_logs_2": 11029.923165846496,
"sim_count": 10000,
},
{
"totals": 1580,
"sum_logs": 4055.965234848171,
"sum_logs_2": 12357.911862914,
"sim_count": 100,
},
{
"totals": 0,
"sum_logs": 0,
"sum_logs_2": 0,
"sim_count": 100,
},
]
DIRICHLET_POSTERIORS_INPUTS = [
{
"concentration": [1, 2, 3],
"prior": [1, 1, 1],
"sim_count": 10000,
},
{
"concentration": [100, 200],
"prior": [1 / 2, 1 / 2],
"sim_count": 100,
},
]
@pytest.mark.parametrize("inp", BETA_POSTERIORS_ALL_INPUTS)
def test_beta_posteriors_all(inp):
all_pos = beta_posteriors_all(
inp["totals"],
inp["successes"],
inp["sim_count"],
inp["a_priors_beta"],
inp["b_priors_beta"],
)
all_pos_shape = np.array(all_pos).shape
assert all_pos_shape == (len(inp["totals"]), inp["sim_count"])
@pytest.mark.parametrize("inp", LOGNORMAL_POSTERIORS_INPUTS)
def test_lognormal_posteriors(inp):
all_pos = lognormal_posteriors(
inp["totals"],
inp["sum_logs"],
inp["sum_logs_2"],
inp["sim_count"],
)
assert len(all_pos) == inp["sim_count"]
@pytest.mark.parametrize("inp", DIRICHLET_POSTERIORS_INPUTS)
def test_dirichlet_posteriors(inp):
all_pos = dirichlet_posteriors(
inp["concentration"],
inp["prior"],
inp["sim_count"],
)
assert all_pos.shape == (inp["sim_count"], len(inp["concentration"]))
| StarcoderdataPython |
12817417 | <reponame>MaxOnNet/scopuli-core-web
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright [2017] <NAME> [<EMAIL>]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import request
from flask import g as context
from flask import abort
import Scopuli.Interfaces.MySQL as MySQL
class WebForm:
def __init__(self, application):
self._application = application
self._config = self._application.config_xml
def _validate_integer(self, value):
return True
def _validate_string(self, value):
return True
def _validate_boolean(self, value):
return True
def _validate_email(self, value):
return True
def get(self, attribute_name, attribute_type="integer", requred=True, default=""):
if str(request.method).lower() == "post":
if attribute_name in request.form:
attribute_value = request.form[attribute_name]
fn_validate_name = "_validate_{}".format(attribute_type)
if hasattr(self, fn_validate_name):
fn_validate = getattr(self, fn_validate_name)
if fn_validate(attribute_value):
return attribute_value
if not requred:
return default
else:
abort(400)
| StarcoderdataPython |
1793062 | version = '0.19.0.1' | StarcoderdataPython |
3310430 | from datetime import datetime, timedelta
import pytz
from django.conf import settings
from django.db.models.query import QuerySet
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import ExpiringLink, Image
from .serializers import (
ExpiringLinkSerializer,
ImageLinkSerializer,
ImageSerializer,
)
class ImageViewSet(viewsets.ModelViewSet):
"""Viewset for Image model."""
permission_classes = [IsAuthenticated]
queryset = Image.objects.all()
http_method_names = ["get", "post", "head"]
def get_queryset(self) -> QuerySet:
"""Returns Image objects of requested user."""
return Image.objects.filter(owner=self.request.user)
def get_serializer_class(self):
"""Returns serializer based on request user account tier settings."""
if self.request.user.accountTier.fetch_url:
return ImageLinkSerializer
return ImageSerializer
class ExpiringLinkViewSet(viewsets.ViewSet):
"""Viewset for ExpiringLink."""
def retrieve(self, request, pk: int) -> Response:
"""Checks link expiration time, returns data if it is valid."""
queryset = ExpiringLink.objects.all()
expiring_link = get_object_or_404(queryset, pk=pk)
timezone = pytz.timezone(settings.TIME_ZONE)
expiration_time = expiring_link.created_time + timedelta(
seconds=expiring_link.expiration_time
)
if (expiration_time) < datetime.now(tz=timezone):
return Response({"url": "This link has expired!"})
serializer = ExpiringLinkSerializer(expiring_link)
return Response(serializer.data)
| StarcoderdataPython |
1985384 | import numpy as np
from random import shuffle
from sklearn.utils.class_weight import compute_class_weight as ccw
from GrapHiC.models.GATE_modules import GATE_promoter_module
from GrapHiC.models.lightning_nets import LitClassifierNet
from GrapHiC.Dataset import HiC_Dataset
import torch
from torch import Tensor
from torch.nn import CrossEntropyLoss
from torch.utils.data import random_split
import torch_geometric as tgm
from torch_geometric.data import DataLoader
from torch_geometric.utils import degree
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import LearningRateMonitor
DATASET = "Data/dset_15difffeatures_500kb_wayneRNAseq.pt"
NUMEPOCHS = 5000
NUMCLASSES = 3
BATCHSIZE = 100
LEARNING_RATE = 0.0001
POS_EMBEDDING_DROPOUT = 0.01
FULLY_CONNECTED_DROPOUT = 0.01
EDGE_DROPOUT = 0.1
MANUAL_SEED = 30
'''
MAIN FUNCTION
'''
def main(hparams):
if hparams.hiddenchannels%2 !=0 and hparams.positional_encoding:
print("positional encoding requires an even number of hidden channels, adding one to hidden channels")
hparams.hiddenchannels += 1
'''
CONSTRUCTING THE DATALOADERS
'''
print("Loading datasets")
if hparams.inmemory != 1:
train_dset = HiC_Dataset("Data")
val_dset = HiC_Dataset("Data",train= False)
print("Calculating class weights")
vals = np.array([d.y[0,1].item() for d in train_dset])
nums = [np.sum(np.array(vals)==idx) for idx in [0,1,2]]
weights = ccw('balanced',
np.array([0.0,1.0,2.0]),
vals)
else:
dset = torch.load(hparams.dataset)
print("Calculating class weights")
#vals = np.array([d.y[0,1].item() for d in dset])
#nums = [np.sum(np.array(vals)==idx) for idx in [0,1,2]]
#weights = ccw('balanced',
# np.array([0.0,1.0,2.0]),
# vals)
vals = np.array([d.y.item() for d in dset])
idxs = {'up': np.argsort(vals)[-5000:],
'down': np.argsort(vals)[:5000],
'nonsig': np.argsort(abs(vals))[:5000]
}
newdset = []
for cls in ['up','down','nonsig']:
for idx in idxs[cls]:
d_add = dset[idx]
if cls == 'up':
d_add.y = 0.0
elif cls == 'down':
d_add.y = 1.0
else:
d_add.y = 2.0
newdset.append(d_add)
dset = newdset
shuffle(dset)
numdatapoints = len(dset)
trainsize = int(numdatapoints*hparams.trainfraction)
train_dset, val_dset = random_split(dset,
[trainsize, numdatapoints-trainsize],
generator=torch.Generator().manual_seed(MANUAL_SEED)
)
classes = ('up','down','nonsig')
criterion = CrossEntropyLoss()
print("Loaded in memory datasets")
train_loader = DataLoader(train_dset,
batch_size=hparams.batchsize,
shuffle = True,
drop_last=True
)
val_loader = DataLoader(val_dset,
batch_size=hparams.batchsize,
shuffle = True,
drop_last=True
)
'''
INITIALISING/TRAINING THE MODEL
'''
NUMCHIP = dset[0].x.shape[1]
NUMEDGE = dset[0].edge_attr.shape[1]
NUMNODESPERGRAPH = dset[0].x.shape[0]
if hparams.dropout is not None:
hparams.pdropout = hparams.dropout
hparams.fdropout = hparams.dropout
if hparams.recurrent == 1:
hparams.recurrent = True
else:
hparams.recurrent = False
if hparams.principal_neighbourhood_aggregation == 1:
hparams.principal_neighbourhood_aggregation = True
# Compute in-degree histogram over training data.
deg = torch.zeros(NUMNODESPERGRAPH+1, dtype=torch.long)
for data in train_dset:
d = degree(data.edge_index[1],
num_nodes=data.num_nodes,
dtype=torch.long)
deg += torch.bincount(d, minlength=deg.numel())
else:
hparams.principal_neighbourhood_aggregation = False
deg = None
module = GATE_promoter_module(hidden_channels = hparams.hiddenchannels,
inchannels = NUMCHIP,
edgechannels = NUMEDGE,
embedding_layers = hparams.embeddinglayers,
num_fc = hparams.fullyconnectedlayers,
fc_channels = hparams.fullyconnectedchannels,
num_graph_convs = hparams.graph_convolutions,
positional_encoding = hparams.positional_encoding,
pos_embedding_dropout = hparams.pdropout,
fc_dropout = hparams.fdropout,
edge_dropout = hparams.edropout,
recurrent = hparams.recurrent,
numnodespergraph = NUMNODESPERGRAPH,
principal_neighbourhood_aggregation = hparams.principal_neighbourhood_aggregation,
deg = deg,
aggr = hparams.aggregation,
heads = hparams.heads
)
Net = LitClassifierNet(module,
train_loader,
val_loader,
criterion = criterion,
inputhparams = hparams
)
tb_logger = pl_loggers.TensorBoardLogger(hparams.logdir,
name = hparams.experiment_name,
version = hparams.version
)
lr_monitor = LearningRateMonitor(logging_interval='step')
if hparams.plot_lr and hparams.auto_lr_find:
trainer = pl.Trainer(gpus=hparams.gpus,
max_epochs=hparams.epochs,
progress_bar_refresh_rate=1,
logger=tb_logger,
auto_lr_find=False,
resume_from_checkpoint=hparams.checkpoint,
callbacks=[lr_monitor],
stochastic_weight_avg=True
)
lr_finder = trainer.tuner.lr_find(Net)
# Results can be found in
lr_finder.results
# Plot with
if hparams.plot_lr:
fig = lr_finder.plot(suggest=True)
fig.savefig("learning_rate_suggestion.png", format = 'png')
# Pick point based on plot, or get suggestion
new_lr = lr_finder.suggestion()
# update hparams of the model
Net.hparams.lr = new_lr
elif hparams.auto_lr_find:
trainer = pl.Trainer(gpus=hparams.gpus,
max_epochs=hparams.epochs,
progress_bar_refresh_rate=1,
logger=tb_logger,
auto_lr_find=True,
resume_from_checkpoint=hparams.checkpoint,
callbacks=[lr_monitor],
stochastic_weight_avg=True
)
trainer.tune(Net)
else:
trainer = pl.Trainer(gpus=hparams.gpus,
max_epochs=hparams.epochs,
progress_bar_refresh_rate=1,
logger=tb_logger,
auto_lr_find=False,
resume_from_checkpoint=hparams.checkpoint,
callbacks=[lr_monitor],
stochastic_weight_avg=True
)
Net.hparams.lr = hparams.learning_rate
trainer.fit(Net, train_loader, val_loader)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-g',
'--gpus',
type = int,
default=1)
parser.add_argument('-hidden',
'--hiddenchannels',
type = int,
default=10)
parser.add_argument('-heads',
'--heads',
type = int,
default=4)
parser.add_argument('-em',
'--embeddinglayers',
type = int,
default=10)
parser.add_argument('-e',
'--epochs',
type = int,
default=NUMEPOCHS)
parser.add_argument('-l',
'--logdir',
type = str,
default='runs/')
parser.add_argument('-b',
'--batchsize',
type = int,
default=BATCHSIZE)
parser.add_argument('-d',
'--dataset',
type = str,
default=DATASET)
parser.add_argument('-t',
'--trainfraction',
type = float,
default=0.7)
parser.add_argument('--learning_rate',
type = float,
default=LEARNING_RATE)
parser.add_argument('--pdropout',
type = float,
default=POS_EMBEDDING_DROPOUT)
parser.add_argument('--fdropout',
type = float,
default=FULLY_CONNECTED_DROPOUT)
parser.add_argument('--edropout',
type = float,
default =EDGE_DROPOUT)
parser.add_argument('-c',
'--checkpoint',
default=None)
parser.add_argument('-alr',
'--auto_lr_find',
type = bool,
default=False)
parser.add_argument('-plr',
'--plot_lr',
type = bool,
default=False)
parser.add_argument('-p',
'--positional_encoding',
type = bool,
default=True)
parser.add_argument('-pna',
'--principal_neighbourhood_aggregation',
type = int,
default=0)
parser.add_argument('-m',
'--manual_seed',
type = int,
default=MANUAL_SEED)
parser.add_argument('-v',
'--version',
type = int,
default = 0)
parser.add_argument('-en',
'--experiment_name',
type = str,
default = 'default')
parser.add_argument('-n',
'--numsteps',
type = int,
default = int(1e6))
parser.add_argument('-gc',
'--graph_convolutions',
type = int,
default = 6)
parser.add_argument('-fcc',
'--fullyconnectedchannels',
type = int,
default = 10)
parser.add_argument('-fcl',
'--fullyconnectedlayers',
type = int,
default = 10)
parser.add_argument('-im',
'--inmemory',
type = int,
default = 1)
parser.add_argument('-dr',
'--dropout',
type = float,
default = None)
parser.add_argument('-r',
'--recurrent',
type = int,
default = 1)
parser.add_argument('-a',
'--aggregation',
type = str,
default = 'add')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3524391 | # Django core
from django.contrib import admin
# Our apps
from .models import UserSocialAuth
admin.site.register(UserSocialAuth)
| StarcoderdataPython |
17032 | <filename>featureflow/feature_registration.py
class FeatureRegistration:
def __init__(self, key, failoverVariant, variants=[]):
"""docstring for __init__"""
self.key = key
self.failoverVariant = failoverVariant
self.variants = [v.toJSON() for v in variants]
def toJSON(self):
"""docstring for toJSON"""
self.__dict__
class Variant:
def __init__(self, key, name):
"""docstring for __init__"""
self.key = key
self.name = name
def toJSON(self):
"""docstring for toJSON"""
self.__dict__
| StarcoderdataPython |
9722013 | from dataclasses import dataclass
from paper_trader.utils.dataclasses import primary_key, to_pandas
from paper_trader.utils.pandas import rows_count
from paper_trader.utils.price import Price
@dataclass
class DataclassNoPk:
a: str
b: int
@primary_key("c")
@dataclass
class DataclassWithPk:
a: str
b: Price
c: str
def test_to_pandas():
df = to_pandas([])
assert df is None
df = to_pandas([DataclassNoPk("str1", 1), DataclassNoPk("str2", 2)])
assert df is not None
assert df.size == 4 # 2 rows x 2 cols
assert rows_count(df.loc[((df["a"] == "str1") & (df["b"] == 1))]) == 1
assert rows_count(df.loc[((df["a"] == "str2") & (df["b"] == 2))]) == 1
df = to_pandas(
[
DataclassWithPk("abc1", Price("12.34"), "2def"),
DataclassWithPk("ghi3", Price("43.21"), "jkl4"),
]
)
assert df is not None
assert df.size == 4 # 2 rows x 3 cols (-1 index)
row = df.loc[((df["a"] == "abc1") & (df["b"] == 12.34))]
assert rows_count(row) == 1
assert row.index[0] == "2def"
row = df.loc[((df["a"] == "ghi3") & (df["b"] == 43.21))]
assert rows_count(row) == 1
assert row.index[0] == "jkl4"
| StarcoderdataPython |
8081359 | import open3d as o3d
import numpy as np
def voxel_grid_to_pcd(voxel_grid, n_points=50):
box_structure = []
pcd_structure = []
point_cloud_np = np.asarray([voxel_grid.origin + pt.grid_index*voxel_grid.voxel_size for pt in voxel_grid.get_voxels()])
for voxel in voxel_grid.get_voxels():
mesh_box = o3d.geometry.TriangleMesh.create_box(width=voxel_grid.voxel_size,
height=voxel_grid.voxel_size,
depth=voxel_grid.voxel_size)
mesh_box.paint_uniform_color(voxel.color)
mesh_box.translate(voxel_grid.origin + voxel.grid_index*voxel_grid.voxel_size)
pcd_node = mesh_box.sample_points_uniformly(number_of_points=n_points)
box_structure.append(mesh_box)
pcd_structure.append(pcd_node)
pcd_map = o3d.geometry.PointCloud()
pcd_map.points = o3d.utility.Vector3dVector([])
for x in range(len(pcd_structure)):
pt_antigo = np.asarray(pcd_map.points)
pt_novo = np.asarray(pcd_structure[x].points)
cor_antiga = np.asarray(pcd_map.colors)
cor_nova = np.asarray(pcd_structure[x].colors)
pcd_map.points = o3d.utility.Vector3dVector(np.append(pt_antigo, pt_novo, axis=0))
pcd_map.colors = o3d.utility.Vector3dVector(np.append(cor_antiga, cor_nova, axis=0))
return pcd_map
def pcd_to_voxel_grid(pcd, voxel_size=0.2):
f_voxel = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=voxel_size)
return f_voxel
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.