hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e098241d28e098853efa7bbedd7a883dd13ccfd5 | 17,454 | py | Python | _notebooks/overview_helpers.py | kant/covid19-dashboard | 5195e3ae4fe08eb9a726f3e2a3124646390ae1a2 | [
"Apache-2.0"
] | null | null | null | _notebooks/overview_helpers.py | kant/covid19-dashboard | 5195e3ae4fe08eb9a726f3e2a3124646390ae1a2 | [
"Apache-2.0"
] | 5 | 2020-04-04T14:24:42.000Z | 2022-02-26T07:02:00.000Z | _notebooks/overview_helpers.py | kant/covid19-dashboard | 5195e3ae4fe08eb9a726f3e2a3124646390ae1a2 | [
"Apache-2.0"
] | null | null | null | import os
from urllib import request
import pandas as pd
data_folder = (os.path.join(os.path.dirname(__file__), 'data_files')
if '__file__' in locals() else 'data_files')
class SourceData:
df_mappings = pd.read_csv(os.path.join(data_folder, 'mapping_countries.csv'))
mappings = {'replace.country': dict(df_mappings.dropna(subset=['Name'])
.set_index('Country')['Name']),
'map.continent': dict(df_mappings.set_index('Name')['Continent'])
}
@classmethod
def get_overview_template(cls):
with open(os.path.join(data_folder, 'overview.tpl')) as f:
return f.read()
@classmethod
def get_covid_dataframe(cls, name):
url = (
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv')
df = pd.read_csv(url)
# rename countries
df['Country/Region'] = df['Country/Region'].replace(cls.mappings['replace.country'])
return df
@staticmethod
def get_dates(df):
dt_cols = df.columns[~df.columns.isin(['Province/State', 'Country/Region', 'Lat', 'Long'])]
LAST_DATE_I = -1
# sometimes last column may be empty, then go backwards
for i in range(-1, -len(dt_cols), -1):
if not df[dt_cols[i]].fillna(0).eq(0).all():
LAST_DATE_I = i
break
return LAST_DATE_I, dt_cols
class OverviewData:
COL_REGION = 'Country/Region'
ABS_COLS = ['Cases', 'Deaths', 'Cases (+)', 'Deaths (+)']
dft_cases = SourceData.get_covid_dataframe('confirmed')
dft_deaths = SourceData.get_covid_dataframe('deaths')
dft_recovered = SourceData.get_covid_dataframe('recovered')
LAST_DATE_I, dt_cols = SourceData.get_dates(dft_cases)
dt_today = dt_cols[LAST_DATE_I]
dfc_cases = dft_cases.groupby(COL_REGION)[dt_today].sum()
dfc_deaths = dft_deaths.groupby(COL_REGION)[dt_today].sum()
PREV_LAG = 5
dt_lag = dt_cols[LAST_DATE_I - PREV_LAG]
@classmethod
def lagged_cases(cls, lag=PREV_LAG):
return cls.dft_cases.groupby(cls.COL_REGION)[cls.dt_cols[cls.LAST_DATE_I - lag]].sum()
@classmethod
def lagged_deaths(cls, lag=PREV_LAG):
return cls.dft_deaths.groupby(cls.COL_REGION)[cls.dt_cols[cls.LAST_DATE_I - lag]].sum()
@classmethod
def overview_table(cls):
df_table = (pd.DataFrame(dict(Cases=cls.dfc_cases,
Deaths=cls.dfc_deaths,
PCases=cls.lagged_cases(),
PDeaths=cls.lagged_deaths()))
.sort_values(by=['Cases', 'Deaths'], ascending=[False, False])
.reset_index())
df_table.rename(columns={'index': 'Country/Region'}, inplace=True)
for c in 'Cases, Deaths'.split(', '):
df_table[f'{c} (+)'] = (df_table[c] - df_table[f'P{c}']).clip(0) # DATA BUG
df_table['Fatality Rate'] = (100 * df_table['Deaths'] / df_table['Cases']).round(1)
df_table['Continent'] = df_table['Country/Region'].map(SourceData.mappings['map.continent'])
# remove problematic
df_table = df_table[~df_table['Country/Region'].isin(['Cape Verde', 'Cruise Ship', 'Kosovo'])]
return df_table
@classmethod
def make_summary_dict(cls):
df_table = cls.overview_table()
metrics = cls.ABS_COLS
s_china = df_table[df_table['Country/Region'].eq('China')][metrics].sum().add_prefix('China ')
s_us = df_table[df_table['Country/Region'].eq('US')][metrics].sum().add_prefix('US ')
s_eu = df_table[df_table['Continent'].eq('Europe')][metrics].sum().add_prefix('EU ')
summary = {'updated': pd.to_datetime(cls.dt_today), 'since': pd.to_datetime(cls.dt_lag)}
summary = {**summary, **df_table[metrics].sum(), **s_china, **s_us, **s_eu}
return summary
@classmethod
def make_new_cases_arrays(cls, n_days=50):
dft_ct_cases = cls.dft_cases.groupby(cls.COL_REGION)[cls.dt_cols].sum()
dft_ct_new_cases = dft_ct_cases.diff(axis=1).fillna(0).astype(int)
return dft_ct_new_cases.loc[:, cls.dt_cols[cls.LAST_DATE_I - n_days]:cls.dt_cols[cls.LAST_DATE_I]]
class WordPopulation:
csv_path = os.path.join(data_folder, 'world_population.csv')
page = 'https://www.worldometers.info/world-population/population-by-country/'
@classmethod
def scrape(cls):
# !pip install beautifulsoup4
# !pip install lxml
import bs4
# read html
source = request.urlopen(cls.page).read()
soup = bs4.BeautifulSoup(source, 'lxml')
# get pandas df
table = soup.find_all('table')
return pd.read_html(str(table))[0]
@classmethod
def download(cls):
df = cls.scrape()
# clean up df
rename_map = {'Country (or dependency)': 'country',
'Population (2020)': 'population',
'Land Area (Km²)': 'area',
'Urban Pop %': 'urban_ratio',
}
df_clean = df.rename(rename_map, axis=1)[rename_map.values()]
df_clean['urban_ratio'] = pd.to_numeric(df_clean['urban_ratio'].str.extract(r'(\d*)')[0]) / 100
df_clean.to_csv(cls.csv_path, index=None)
@classmethod
def load(cls):
if not os.path.exists(cls.csv_path):
cls.download()
return pd.read_csv(cls.csv_path)
class HostpitalBeds(WordPopulation):
csv_path = os.path.join(data_folder, 'hospital_beds.csv')
page = 'https://en.wikipedia.org/wiki/List_of_countries_by_hospital_beds'
@classmethod
def download(cls):
df_wiki = cls.scrape()
# clean up df wikie
df_wiki = df_wiki.droplevel([0, 1], axis=1)
rename_map = {'Country/territory': 'country',
'ICU-CCB beds/100,000 inhabitants': 'icu_per_100k',
df_wiki.columns[df_wiki.columns.str.startswith('Occupancy')][0]: 'occupancy',
'2017': 'beds_per_1000_2017',
}
df_clean = df_wiki.rename(rename_map, axis=1)[rename_map.values()]
df_clean['icu_per_100k'] = pd.to_numeric(df_clean['icu_per_100k'].str
.replace(r'\[\d*\]', ''))
# load df for asian countries
# file manually created from
# https://www.researchgate.net/publication/338520008_Critical_Care_Bed_Capacity_in_Asian_Countries_and_Regions
df_asia = pd.read_csv(os.path.join(data_folder, 'ccb_asian_countries.csv'))
df_clean = pd.concat([df_clean,
df_asia[~df_asia['country'].isin(df_clean['country'])]])
df_clean.to_csv(cls.csv_path, index=None)
class OverviewDataExtras(OverviewData):
ABS_COLS_MAP = {'Cases': 'Cases.total',
'Deaths': 'Deaths.total',
'Cases (+)': 'Cases.new',
'Deaths (+)': 'Deaths.new'}
ABS_COLS_RENAMED = list(ABS_COLS_MAP.values())
PER_100K_COLS = [f'{c}.per100k' for c in ABS_COLS_RENAMED]
CASES_COLS = ABS_COLS_RENAMED[::2] + PER_100K_COLS[::2]
EST_COLS = [f'{c}.est' for c in CASES_COLS]
@classmethod
def populations_df(cls):
df_pop = WordPopulation.load().rename(columns={'country': cls.COL_REGION})
df_pop[cls.COL_REGION] = df_pop[cls.COL_REGION].map({
'United States': 'US',
'Czech Republic (Czechia)': 'Czechia',
'Taiwan': 'Taiwan*',
'State of Palestine': 'West Bank and Gaza',
'Côte d\'Ivoire': 'Cote d\'Ivoire',
}).fillna(df_pop[cls.COL_REGION])
return df_pop.set_index(cls.COL_REGION)
@classmethod
def beds_df(cls):
df_beds = HostpitalBeds.load().rename(columns={'country': cls.COL_REGION})
df_beds[cls.COL_REGION] = df_beds[cls.COL_REGION].map({
'United States': 'US',
'United Kingdom (more)': 'United Kingdom',
'Czech Republic': 'Czechia',
}).fillna(df_beds[cls.COL_REGION])
return df_beds.set_index(cls.COL_REGION)
@classmethod
def overview_table_with_per_100k(cls):
df = (cls.overview_table()
.rename(columns=cls.ABS_COLS_MAP)
.drop(['PCases', 'PDeaths'], axis=1)
.set_index(cls.COL_REGION, drop=True)
.sort_values('Cases.new', ascending=False))
df['Fatality Rate'] /= 100
df_pop = cls.populations_df()
df['population'] = df_pop['population']
df.dropna(subset=['population'], inplace=True)
for col, per_100k_col in zip(cls.ABS_COLS_RENAMED, cls.PER_100K_COLS):
df[per_100k_col] = df[col] * 1e5 / df['population']
return df
@classmethod
def table_with_estimated_cases(cls, death_lag=8):
"""
Assumptions:
- unbiased (if everyone is tested) mortality rate is
around 1.5% (from what was found in heavily tested countries)
- it takes on average 8 days after being reported case (tested positive)
to die and become reported death.
- testing ratio / bias (how many are suspected tested) of countries
didn't change significantly during the last 8 days.
- Recent new cases can be adjusted using the same testing_ratio bias.
"""
probable_unbiased_mortality_rate = 0.015 # Diamond Princess / Kuwait / South Korea
lagged_mortality_rate = (cls.dfc_deaths + 1) / (cls.lagged_cases(death_lag) + 1)
testing_bias = lagged_mortality_rate / probable_unbiased_mortality_rate
testing_bias[testing_bias < 1] = 1
df = cls.overview_table_with_per_100k()
df['testing_bias'] = testing_bias
for col, est_col in zip(cls.CASES_COLS, cls.EST_COLS):
df[est_col] = df['testing_bias'] * df[col]
return df.sort_values('Cases.new.est', ascending=False)
@classmethod
def smoothed_growth_rates(cls, n_days):
recent_dates = cls.dt_cols[-n_days:]
cases = (cls.dft_cases.groupby(cls.COL_REGION).sum()[recent_dates] + 1) # with pseudo counts
diffs = cls.dft_cases.groupby(cls.COL_REGION).sum().diff(axis=1)[recent_dates]
# dates with larger number of cases have higher sampling accuracy
# so their measurement deserve more confidence
sampling_weights = (cases.T / cases.sum(axis=1).T).T
# daily rate is new / (total - new)
daily_growth_rates = cases / (cases - diffs)
weighted_growth_rate = (daily_growth_rates * sampling_weights).sum(axis=1)
return weighted_growth_rate
@classmethod
def table_with_icu_capacities(cls):
df = cls.table_with_estimated_cases()
df_beds = cls.beds_df()
df['icu_capacity_per100k'] = df_beds['icu_per_100k']
# occupancy 66% for us:
# https://www.sccm.org/Blog/March-2020/United-States-Resource-Availability-for-COVID-19
# occupancy average 75% for OECD:
# https://www.oecd-ilibrary.org/social-issues-migration-health/health-at-a-glance-2019_4dd50c09-en
df['icu_spare_capacity_per100k'] = df['icu_capacity_per100k'] * 0.3
return df
@classmethod
def table_with_projections(cls, projection_days=(7, 14, 30, 60, 90), plot_countries=()):
df = cls.table_with_icu_capacities()
df['affected_ratio'] = df['Cases.total'] / df['population']
past_recovered, past_active, simulation_start_day = (
cls._calculate_recovered_and_active_until_now(df))
df, past_recovered, past_active = cls._run_SIR_model_forward(
df,
past_recovered=past_recovered,
past_active=past_active,
projection_days=projection_days)
if len(plot_countries):
cls._plot_SIR_for_countries(plot_countries=plot_countries,
past_recovered=past_recovered,
past_active=past_active,
simulation_start_day=simulation_start_day,
growth_rate=df['growth_rate'])
return df
@classmethod
def _calculate_recovered_and_active_until_now(cls, df, recovery_lagged9_rate=0.07):
# estimated daily cases ratio of population
lagged_cases_ratios = (cls.dft_cases.groupby(cls.COL_REGION).sum()[cls.dt_cols].T *
df['testing_bias'].T / df['population'].T).T
# protect from testing bias over-inflation
lagged_cases_ratios[lagged_cases_ratios > 1] = 1
# run through history and estimate recovered and active using:
# https://covid19dashboards.com/outstanding_cases/#Appendix:-Methodology-of-Predicting-Recovered-Cases
recs, actives = [], []
zeros_series = lagged_cases_ratios[cls.dt_cols[0]] * 0 # this is to have consistent types
day = 0
for day in range(len(cls.dt_cols)):
prev_rec = recs[day - 1] if day > 0 else zeros_series
tot_lagged_9 = lagged_cases_ratios[cls.dt_cols[day - 9]] if day >= 8 else zeros_series
recs.append(prev_rec + (tot_lagged_9 - prev_rec) * recovery_lagged9_rate)
actives.append(lagged_cases_ratios[cls.dt_cols[day]] - recs[day])
return recs, actives, day
@classmethod
def _run_SIR_model_forward(cls,
df,
past_recovered,
past_active,
projection_days,
recovery_lagged9_rate=0.07):
cur_growth_rate = cls.smoothed_growth_rates(n_days=cls.PREV_LAG)
df['growth_rate'] = (cur_growth_rate - 1)
cur_recovery_rate = (past_recovered[-1] - past_recovered[-2]) / past_active[-1]
infect_rate = cur_growth_rate - 1 + cur_recovery_rate
ICU_ratio = 0.06
rec_rate_simple = 0.05
# simulate
df['peak_icu_neek_per100k'] = 0
for day in range(1, projection_days[-1] + 1):
# calculate susceptible
sus = 1 - past_recovered[-1] - past_active[-1]
# calculate new recovered
actives_lagged_9 = past_active[-9]
delta_rec = actives_lagged_9 * recovery_lagged9_rate
delta_rec_simple = past_active[-1] * rec_rate_simple
# limit recovery rate to simple SIR model where
# lagged rate estimation becomes too high (on the downward slopes)
delta_rec[delta_rec > delta_rec_simple] = delta_rec_simple[delta_rec > delta_rec_simple]
new_recovered = past_recovered[-1] + delta_rec
# calculate new active
delta_infect = past_active[-1] * sus * infect_rate
new_active = past_active[-1] + delta_infect - delta_rec
new_active[new_active < 0] = 0
# update
past_recovered.append(new_recovered)
past_active.append(new_active)
icu_need = past_active[-1] * df['population'] * ICU_ratio / 1e5
df['peak_icu_neek_per100k'] = pd.concat([df['peak_icu_neek_per100k'],
icu_need], axis=1).max(axis=1)
if day == 1 or day in projection_days:
suffix = f'.+{day}d' if day > 1 else ''
df[f'needICU.per100k{suffix}'] = icu_need
df[f'affected_ratio.est{suffix}'] = 1 - sus
return df, past_recovered, past_active
@classmethod
def _plot_SIR_for_countries(cls, plot_countries, past_recovered,
past_active, simulation_start_day, growth_rate):
for debug_country in plot_countries:
debug = [{'day': day - simulation_start_day,
'Susceptible': (1 - a - r)[debug_country],
'Infected': a[debug_country],
'Removed': r[debug_country]}
for day, (r, a) in enumerate(zip(past_recovered, past_active))
if day > simulation_start_day]
title = (f"{debug_country}: "
f"Growth Rate: {growth_rate[debug_country]:.0%}. "
f"S/I/R init: {debug[0]['Susceptible']:.1%},"
f"{debug[0]['Infected']:.1%},{debug[0]['Removed']:.1%}")
pd.DataFrame(debug).set_index('day').plot(title=title)
@classmethod
def filter_df(cls, df):
return df[df['Deaths.total'] > 10][df.columns.sort_values()]
def pandas_console_options():
pd.set_option('display.max_colwidth', 300)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
def overview_html():
template_text = SourceData.get_overview_template()
import numpy as np
import pandas as pd
from jinja2 import Template
from IPython.display import HTML
helper = OverviewData
template = Template(template_text)
html = template.render(
D=helper.make_summary_dict(),
table=helper.overview_table(),
newcases=helper.make_new_cases_arrays(),
np=np, pd=pd, enumerate=enumerate)
return HTML(f'<div>{html}</div>')
| 40.875878 | 118 | 0.609259 |
b27801fedad6b0c2dd7b27decd1ddeded2f7dcb7 | 2,154 | py | Python | flod_tilskudd_portal/flod_tilskudd_portal/api/tilskudd_api.py | Trondheim-kommune/Tilskuddsbasen | 4f8ce270ef7296069f8e43bfb4bf6a570a7a35d4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | flod_tilskudd_portal/flod_tilskudd_portal/api/tilskudd_api.py | Trondheim-kommune/Tilskuddsbasen | 4f8ce270ef7296069f8e43bfb4bf6a570a7a35d4 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2021-03-31T18:23:12.000Z | 2021-12-13T19:43:12.000Z | flod_tilskudd_portal/flod_tilskudd_portal/api/tilskudd_api.py | Trondheim-kommune/Tilskuddsbasen | 4f8ce270ef7296069f8e43bfb4bf6a570a7a35d4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import traceback
from flask import current_app, request
from flask.ext.restful import Api, output_json
from flod_common.outputs.output_csv import output_csv
from flod_common.outputs.output_pdf import output_pdf
class TilskuddApi(Api):
def __init__(self, *args, **kwargs):
super(TilskuddApi, self).__init__(*args, **kwargs)
self.representations = {
'text/csv': output_csv,
'application/pdf': output_pdf,
'application/json': output_json
}
def handle_error(self, e):
code = getattr(e, 'code', 500)
self.log_error(e)
if code == 500:
return self.make_response({'message': self.response_msg(e)}, 500)
return super(TilskuddApi, self).handle_error(e)
# OBS: the @app.errorhandler is not the right way to configure the custom error handlers
# when the endpoint is a flask-restful endpoint instead of a standard flask endpoint
# more about it in flask_restful.__init__.error_router
#
# override handle_error instead!
def response_msg(self, e):
return "An error occurred processing %s %s" \
% \
(request.method,
request.path)
def log_msg(self, e):
if request:
return u"An error occurred processing %s %s\n" \
u"Cookies: %s\n" \
u"Args: %s\n" \
u"Json: %s\n" \
u"%s" \
% \
(request.method,
request.path,
request.cookies,
json.dumps(request.args),
request.get_json(),
traceback.format_exc().decode('utf-8'))
else:
return u"An error occurred outside request context!\n" \
u"%s" \
% \
(traceback.format_exc().decode('utf-8'))
def log_error(self, e):
try:
current_app.logger.warn(self.log_msg(e))
except:
current_app.logger.critical("Could not log error!") | 33.65625 | 96 | 0.546425 |
38803440f292cc429313a8e8b28009295cfabc4f | 5,550 | py | Python | unchaind/mapper/siggy.py | xnlfgh/unchaind | af59c380c02401a7ac58139d7f82507b9fb59f75 | [
"MIT"
] | 2 | 2019-01-02T20:43:50.000Z | 2019-01-28T10:15:13.000Z | unchaind/mapper/siggy.py | xnlfgh/unchaind | af59c380c02401a7ac58139d7f82507b9fb59f75 | [
"MIT"
] | 40 | 2018-12-26T16:20:57.000Z | 2019-03-31T13:47:32.000Z | unchaind/mapper/siggy.py | xnlfgh/unchaind | af59c380c02401a7ac58139d7f82507b9fb59f75 | [
"MIT"
] | 4 | 2018-12-25T22:53:51.000Z | 2021-02-20T19:54:51.000Z | """Contains pollers for various EVE Online wormhole space mappers. These
pollers provide the means to authenticate and fetch information about
their current state online, then map that state onto an internal
Universe representation.
They also allow for callbacks when changes occur in their internal state."""
import json
import logging
from typing import Dict, Any, Optional
from io import StringIO
from urllib.parse import urlencode
from lxml import etree
from unchaind.universe import Universe, System, Connection, State
from unchaind.http import HTTPSession
log = logging.getLogger(__name__)
class Map:
"""Uses the Transport to read data from Siggy into a universe."""
universe: Optional[Universe]
def __init__(self, transport: "Transport") -> None:
self.transport = transport
self.universe = None
async def update(self) -> Universe:
"""Update our internal Universe with a new Universe."""
data = await self.transport.update()
if self.universe is None:
self.universe: Universe = await Universe.from_empty()
universe: Universe = await Universe.from_empty()
chain = data["chainMap"]
connections = chain["wormholes"]
if isinstance(connections, list):
# For some weird reason when there are no connections siggy changes
# the type of this to a list instead of a dict
log.debug("update: connections was a list")
self.universe = universe
return universe
for connection in connections.values():
state = State()
state.end_of_life = bool(connection.get("eol", 0))
await universe.connect(
Connection(
System(connection["from_system_id"]),
System(connection["to_system_id"]),
state,
)
)
aliases: Dict[System, str] = {}
systems = chain["systems"]
for system in systems.values():
if (
len(system.get("displayName", ""))
and system["displayName"] != system["name"]
):
aliases[System(system["systemID"])] = system["displayName"]
universe.aliases = aliases
self.universe = universe
return self.universe
class Transport:
"""Represents a Siggy connection to be used to read raw data from Siggy."""
http: HTTPSession
config: Dict[str, Any]
def __init__(self, config: Dict[str, Any]) -> None:
self.http = HTTPSession()
self.config = config
@classmethod
async def from_config(cls, config: Dict[str, Any]) -> Optional["Transport"]:
"""Create an initial instance of a Siggy class, this logs in with the
provided username and password and does an initial fill of the
universe."""
instance = cls(config)
try:
await instance.login(config["username"], config["password"])
except ValueError:
return None
await instance.update()
return instance
async def login(self, username: str, password: str) -> None:
"""Send a login request to the Siggy website. To do so we execute
a first request to get a valid CSRF token and then a second one
with the actual login form.
The login form handler returns us a cookie which we can use for
future requests and is stored into `self.token`.
If this method is called multiple times on the same instance the
old token will be replaced."""
csrf_response = await self.http.request(
url="https://siggy.borkedlabs.com/account/login", method="GET"
)
tree = etree.parse(
StringIO(csrf_response.body.decode("utf8")), etree.HTMLParser()
)
csrf_token = tree.xpath("//input[@name='_token']/@value")[0]
response = await self.http.request(
url="https://siggy.borkedlabs.com/account/login",
method="POST",
follow_redirects=False,
body=urlencode(
{
"username": username,
"password": password,
"_token": csrf_token,
"remember": 0,
}
),
)
# When a login fails for siggy we get put back on the login screen,
# if the login succeeded we get directed to the home page. This is
# a quick check to verify if a login succeeded.
if "login" in response.effective_url:
log.warn("login: redirected back to login, wrong credentails?")
raise ValueError
async def update(self) -> Dict[str, Any]:
"""Update our internal Universe from siggy."""
update_response = await self.http.request(
url="https://siggy.borkedlabs.com/siggy/siggy",
method="POST",
body=urlencode(
{
"systemID": self.config["home_system"],
"mapLastUpdate": 0,
"lastUpdate": 0,
"mapOpen": "true",
"forceUpdate": "true",
}
),
)
try:
return dict(json.loads(update_response.body.decode("utf8")))
except (ValueError, AttributeError, json.decoder.JSONDecodeError):
log.critical("update: got invalid json from siggy on update")
raise ValueError
| 31.896552 | 80 | 0.586667 |
743b39c8935112ff7b91736989d74339a1957d91 | 6,677 | py | Python | tests/contrib/sqlalchemy/mixins.py | soheltarir/dd-trace-py | 65864cea98e8b3602fee619cb8f5b749b5e70bb3 | [
"BSD-3-Clause"
] | null | null | null | tests/contrib/sqlalchemy/mixins.py | soheltarir/dd-trace-py | 65864cea98e8b3602fee619cb8f5b749b5e70bb3 | [
"BSD-3-Clause"
] | null | null | null | tests/contrib/sqlalchemy/mixins.py | soheltarir/dd-trace-py | 65864cea98e8b3602fee619cb8f5b749b5e70bb3 | [
"BSD-3-Clause"
] | null | null | null | # stdlib
import contextlib
# 3rd party
from nose.tools import eq_, ok_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import (
create_engine,
Column,
Integer,
String,
)
# project
from ddtrace.contrib.sqlalchemy import trace_engine
# testing
from tests.opentracer.utils import init_tracer
from ...test_tracer import get_dummy_tracer
Base = declarative_base()
class Player(Base):
"""Player entity used to test SQLAlchemy ORM"""
__tablename__ = 'players'
id = Column(Integer, primary_key=True)
name = Column(String(20))
class SQLAlchemyTestMixin(object):
"""SQLAlchemy test mixin that includes a complete set of tests
that must be executed for different engine. When a new test (or
a regression test) should be added to SQLAlchemy test suite, a new
entry must be appended here so that it will be executed for all
available and supported engines. If the test is specific to only
one engine, that test must be added to the specific `TestCase`
implementation.
To support a new engine, create a new `TestCase` that inherits from
`SQLAlchemyTestMixin` and `TestCase`. Then you must define the following
static class variables:
* VENDOR: the database vendor name
* SQL_DB: the `sql.db` tag that we expect (it's the name of the database
available in the `.env` file)
* SERVICE: the service that we expect by default
* ENGINE_ARGS: all arguments required to create the engine
To check specific tags in each test, you must implement the
`check_meta(self, span)` method.
"""
VENDOR = None
SQL_DB = None
SERVICE = None
ENGINE_ARGS = None
def create_engine(self, engine_args):
# create a SQLAlchemy engine
config = dict(engine_args)
url = config.pop('url')
return create_engine(url, **config)
@contextlib.contextmanager
def connection(self):
# context manager that provides a connection
# to the underlying database
try:
conn = self.engine.connect()
yield conn
finally:
conn.close()
def check_meta(self, span):
# function that can be implemented according to the
# specific engine implementation
return
def setUp(self):
# create an engine with the given arguments
self.engine = self.create_engine(self.ENGINE_ARGS)
# create the database / entities and prepare a session for the test
Base.metadata.drop_all(bind=self.engine)
Base.metadata.create_all(self.engine, checkfirst=False)
Session = sessionmaker(bind=self.engine)
self.session = Session()
# trace the engine
self.tracer = get_dummy_tracer()
trace_engine(self.engine, self.tracer)
def tearDown(self):
# clear the database and dispose the engine
self.session.close()
Base.metadata.drop_all(bind=self.engine)
self.engine.dispose()
def test_orm_insert(self):
# ensures that the ORM session is traced
wayne = Player(id=1, name='wayne')
self.session.add(wayne)
self.session.commit()
traces = self.tracer.writer.pop_traces()
# trace composition
eq_(len(traces), 1)
eq_(len(traces[0]), 1)
span = traces[0][0]
# span fields
eq_(span.name, '{}.query'.format(self.VENDOR))
eq_(span.service, self.SERVICE)
ok_('INSERT INTO players' in span.resource)
eq_(span.get_tag('sql.db'), self.SQL_DB)
eq_(span.get_tag('sql.rows'), '1')
self.check_meta(span)
eq_(span.span_type, 'sql')
eq_(span.error, 0)
ok_(span.duration > 0)
def test_session_query(self):
# ensures that the Session queries are traced
out = list(self.session.query(Player).filter_by(name='wayne'))
eq_(len(out), 0)
traces = self.tracer.writer.pop_traces()
# trace composition
eq_(len(traces), 1)
eq_(len(traces[0]), 1)
span = traces[0][0]
# span fields
eq_(span.name, '{}.query'.format(self.VENDOR))
eq_(span.service, self.SERVICE)
ok_(
'SELECT players.id AS players_id, players.name AS players_name \nFROM players \nWHERE players.name'
in span.resource
)
eq_(span.get_tag('sql.db'), self.SQL_DB)
self.check_meta(span)
eq_(span.span_type, 'sql')
eq_(span.error, 0)
ok_(span.duration > 0)
def test_engine_connect_execute(self):
# ensures that engine.connect() is properly traced
with self.connection() as conn:
rows = conn.execute('SELECT * FROM players').fetchall()
eq_(len(rows), 0)
traces = self.tracer.writer.pop_traces()
# trace composition
eq_(len(traces), 1)
eq_(len(traces[0]), 1)
span = traces[0][0]
# span fields
eq_(span.name, '{}.query'.format(self.VENDOR))
eq_(span.service, self.SERVICE)
eq_(span.resource, 'SELECT * FROM players')
eq_(span.get_tag('sql.db'), self.SQL_DB)
self.check_meta(span)
eq_(span.span_type, 'sql')
eq_(span.error, 0)
ok_(span.duration > 0)
def test_traced_service(self):
# ensures that the service is set as expected
services = self.tracer.writer.pop_services()
expected = {
self.SERVICE: {'app': self.VENDOR, 'app_type': 'db'}
}
eq_(services, expected)
def test_opentracing(self):
"""Ensure that sqlalchemy works with the opentracer."""
ot_tracer = init_tracer('sqlalch_svc', self.tracer)
with ot_tracer.start_active_span('sqlalch_op'):
with self.connection() as conn:
rows = conn.execute('SELECT * FROM players').fetchall()
eq_(len(rows), 0)
traces = self.tracer.writer.pop_traces()
# trace composition
eq_(len(traces), 1)
eq_(len(traces[0]), 2)
ot_span, dd_span = traces[0]
# confirm the parenting
eq_(ot_span.parent_id, None)
eq_(dd_span.parent_id, ot_span.span_id)
eq_(ot_span.name, 'sqlalch_op')
eq_(ot_span.service, 'sqlalch_svc')
# span fields
eq_(dd_span.name, '{}.query'.format(self.VENDOR))
eq_(dd_span.service, self.SERVICE)
eq_(dd_span.resource, 'SELECT * FROM players')
eq_(dd_span.get_tag('sql.db'), self.SQL_DB)
eq_(dd_span.span_type, 'sql')
eq_(dd_span.error, 0)
ok_(dd_span.duration > 0)
| 32.412621 | 111 | 0.628426 |
7dd0f569785eb88096b507ee04eef023b8d750ea | 22,451 | py | Python | mailpile/ui.py | JocelynDelalande/Mailpile | 3e53a54195a0dd8ca48e7cb3be44dd7b3acabd74 | [
"Apache-2.0"
] | null | null | null | mailpile/ui.py | JocelynDelalande/Mailpile | 3e53a54195a0dd8ca48e7cb3be44dd7b3acabd74 | [
"Apache-2.0"
] | null | null | null | mailpile/ui.py | JocelynDelalande/Mailpile | 3e53a54195a0dd8ca48e7cb3be44dd7b3acabd74 | [
"Apache-2.0"
] | null | null | null | #
# This file contains the UserInteraction and Session classes.
#
# The Session encapsulates settings and command results, allowing commands
# to be chanined in an interactive environment.
#
# The UserInteraction classes log the progress and performance of individual
# operations and assist with rendering the results in various formats (text,
# HTML, JSON, etc.).
#
###############################################################################
import datetime
import getpass
import os
import random
import re
import sys
import tempfile
import traceback
import json
import urllib
from collections import defaultdict
from json import JSONEncoder
from jinja2 import TemplateError, TemplateSyntaxError, TemplateNotFound
from jinja2 import TemplatesNotFound, TemplateAssertionError, UndefinedError
import mailpile.commands
import mailpile.util
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.search import MailIndex
from mailpile.util import *
class SuppressHtmlOutput(Exception):
pass
def default_dict(*args):
d = defaultdict(str)
for arg in args:
d.update(arg)
return d
class NoColors:
"""Dummy color constants"""
C_SAVE = ''
C_RESTORE = ''
NORMAL = ''
BOLD = ''
NONE = ''
BLACK = ''
RED = ''
YELLOW = ''
BLUE = ''
MAGENTA = ''
CYAN = ''
FORMAT = "%s%s"
RESET = ''
LINE_BELOW = ''
def max_width(self):
return 79
def color(self, text, color='', weight=''):
return '%s%s%s' % (self.FORMAT % (color, weight), text, self.RESET)
def replace_line(self, text, chars=None):
pad = ' ' * max(0, min(self.max_width(),
self.max_width()-(chars or len(unicode(text)))))
return '%s%s\r' % (text, pad)
def add_line_below(self):
pass
def print_below(self):
pass
def write(self, data):
sys.stderr.write(data)
def check_max_width(self):
pass
class ANSIColors(NoColors):
"""ANSI color constants"""
NORMAL = ''
BOLD = ';1'
NONE = '0'
BLACK = "30"
RED = "31"
YELLOW = "33"
BLUE = "34"
MAGENTA = '35'
CYAN = '36'
RESET = "\x1B[0m"
FORMAT = "\x1B[%s%sm"
CURSOR_UP = "\x1B[1A"
CURSOR_DN = "\x1B[1B"
CURSOR_SAVE = "\x1B[s"
CURSOR_RESTORE = "\x1B[u"
CLEAR_LINE = "\x1B[2K"
def __init__(self):
self.check_max_width()
def replace_line(self, text, chars=None):
return '%s%s%s\r%s' % (self.CURSOR_SAVE,
self.CLEAR_LINE, text,
self.CURSOR_RESTORE)
def max_width(self):
return self.MAX_WIDTH
def check_max_width(self):
try:
import fcntl, termios, struct
fcntl_result = fcntl.ioctl(sys.stdin.fileno(),
termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
h, w, hp, wp = struct.unpack('HHHH', fcntl_result)
self.MAX_WIDTH = (w-1)
except:
self.MAX_WIDTH = 79
class Completer(object):
"""Readline autocompler"""
DELIMS = ' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>?'
def __init__(self, session):
self.session = session
def _available_opts(self, text):
opts = ([s.SYNOPSIS[1] for s in mailpile.commands.COMMANDS] +
[s.SYNOPSIS[2] for s in mailpile.commands.COMMANDS] +
[t.name.lower() for t in self.session.config.tags.values()])
return sorted([o for o in opts if o and o.startswith(text)])
def _autocomplete(self, text, state):
try:
return self._available_opts(text)[state] + ' '
except IndexError:
return None
def get_completer(self):
return lambda t, s: self._autocomplete(t, s)
class UserInteraction:
"""Log the progress and performance of individual operations"""
MAX_BUFFER_LEN = 150
LOG_URGENT = 0
LOG_RESULT = 5
LOG_ERROR = 10
LOG_NOTIFY = 20
LOG_WARNING = 30
LOG_PROGRESS = 40
LOG_DEBUG = 50
LOG_ALL = 99
LOG_PREFIX = ''
def __init__(self, config, log_parent=None, log_prefix=None):
self.log_parent = log_parent
self.log_buffer = []
self.log_buffering = False
self.log_level = self.LOG_ALL
self.log_prefix = log_prefix or self.LOG_PREFIX
self.interactive = False
self.time_tracking = [('Main', [])]
self.time_elapsed = 0.0
self.render_mode = 'text'
self.term = NoColors()
self.config = config
self.html_variables = {
'title': 'Mailpile',
'name': 'Chelsea Manning',
'csrf': '',
'even_odd': 'odd',
'mailpile_size': 0
}
# Logging
def _fmt_log(self, text, level=LOG_URGENT):
c, w, clip = self.term.NONE, self.term.NORMAL, 2048
if level == self.LOG_URGENT:
c, w = self.term.RED, self.term.BOLD
elif level == self.LOG_ERROR:
c = self.term.RED
elif level == self.LOG_WARNING:
c = self.term.YELLOW
elif level == self.LOG_NOTIFY:
c = self.term.CYAN
elif level == self.LOG_DEBUG:
c = self.term.MAGENTA
elif level == self.LOG_PROGRESS:
c, clip = self.term.BLUE, 78
formatted = self.term.replace_line(self.term.color(
unicode(text[:clip]).encode('utf-8'), color=c, weight=w),
chars=len(text[:clip]))
if level != self.LOG_PROGRESS:
formatted += '\n'
return formatted
def _display_log(self, text, level=LOG_URGENT):
if not text.startswith(self.log_prefix):
text = '%slog(%s): %s' % (self.log_prefix, level, text)
if self.log_parent:
self.log_parent.log(level, text)
else:
self.term.write(self._fmt_log(text, level=level))
def _debug_log(self, text, level):
if text and 'log' in self.config.sys.debug:
if not text.startswith(self.log_prefix):
text = '%slog(%s): %s' % (self.log_prefix, level, text)
if self.log_parent:
return self.log_parent.log(level, text)
else:
self.term.write(self._fmt_log(text, level=level))
def clear_log(self):
self.log_buffer = []
def flush_log(self):
try:
while len(self.log_buffer) > 0:
level, message = self.log_buffer.pop(0)
if level <= self.log_level:
self._display_log(message, level)
except IndexError:
pass
def block(self):
self._display_log('')
self.log_buffering = True
def unblock(self):
self.log_buffering = False
self.flush_log()
def log(self, level, message):
if self.log_buffering:
self.log_buffer.append((level, message))
while len(self.log_buffer) > self.MAX_BUFFER_LEN:
self.log_buffer[0:(self.MAX_BUFFER_LEN/10)] = []
elif level <= self.log_level:
self._display_log(message, level)
def finish_command(self):
pass
def start_command(self):
pass
error = lambda self, msg: self.log(self.LOG_ERROR, msg)
notify = lambda self, msg: self.log(self.LOG_NOTIFY, msg)
warning = lambda self, msg: self.log(self.LOG_WARNING, msg)
progress = lambda self, msg: self.log(self.LOG_PROGRESS, msg)
debug = lambda self, msg: self.log(self.LOG_DEBUG, msg)
# Progress indication and performance tracking
times = property(lambda self: self.time_tracking[-1][1])
def mark(self, action=None, percent=None):
"""Note that we are about to perform an action."""
if not action:
try:
action = self.times[-1][1]
except IndexError:
action = 'mark'
self.progress(action)
self.times.append((time.time(), action))
def report_marks(self, quiet=False, details=False):
t = self.times
if t and t[0]:
self.time_elapsed = elapsed = t[-1][0] - t[0][0]
if not quiet:
try:
self.notify(_('Elapsed: %.3fs (%s)') % (elapsed, t[-1][1]))
if details:
for i in range(0, len(self.times)-1):
e = t[i+1][0] - t[i][0]
self.debug(' -> %.3fs (%s)' % (e, t[i][1]))
except IndexError:
self.notify(_('Elapsed: %.3fs') % elapsed)
return elapsed
return 0
def reset_marks(self, mark=True, quiet=False, details=False):
"""This sequence of actions is complete."""
if self.times and mark:
self.mark()
elapsed = self.report_marks(quiet=quiet, details=details)
self.times[:] = []
return elapsed
def push_marks(self, subtask):
"""Start tracking a new sub-task."""
self.time_tracking.append((subtask, []))
def pop_marks(self, name=None, quiet=True):
"""Sub-task ended!"""
elapsed = self.report_marks(quiet=quiet)
if len(self.time_tracking) > 1:
if not name or (self.time_tracking[-1][0] == name):
self.time_tracking.pop(-1)
return elapsed
# Higher level command-related methods
def _display_result(self, result):
sys.stdout.write(unicode(result).encode('utf-8').rstrip())
sys.stdout.write('\n')
def start_command(self, cmd, args, kwargs):
self.flush_log()
self.push_marks(cmd)
self.mark(('%s(%s)'
) % (cmd, ', '.join((args or tuple()) +
('%s' % kwargs, ))))
def finish_command(self, cmd):
self.pop_marks(name=cmd)
def display_result(self, result):
"""Render command result objects to the user"""
self._display_log('', level=self.LOG_RESULT)
if self.render_mode == 'json':
return self._display_result(result.as_json())
for suffix in ('css', 'html', 'js', 'rss', 'txt', 'xml'):
if self.render_mode.endswith(suffix):
if self.render_mode in (suffix, 'j' + suffix):
template = 'as.' + suffix
else:
template = self.render_mode.replace('.j' + suffix,
'.' + suffix)
return self._display_result(
result.as_template(suffix, template=template))
return self._display_result(unicode(result))
# Creating output files
DEFAULT_DATA_NAME_FMT = '%(msg_mid)s.%(count)s_%(att_name)s.%(att_ext)s'
DEFAULT_DATA_ATTRS = {
'msg_mid': 'file',
'mimetype': 'application/octet-stream',
'att_name': 'unnamed',
'att_ext': 'dat',
'rand': '0000'
}
DEFAULT_DATA_EXTS = {
# FIXME: Add more!
'text/plain': 'txt',
'text/html': 'html',
'image/gif': 'gif',
'image/jpeg': 'jpg',
'image/png': 'png'
}
def _make_data_filename(self, name_fmt, attributes):
return (name_fmt or self.DEFAULT_DATA_NAME_FMT) % attributes
def _make_data_attributes(self, attributes={}):
attrs = self.DEFAULT_DATA_ATTRS.copy()
attrs.update(attributes)
attrs['rand'] = '%4.4x' % random.randint(0, 0xffff)
if attrs['att_ext'] == self.DEFAULT_DATA_ATTRS['att_ext']:
if attrs['mimetype'] in self.DEFAULT_DATA_EXTS:
attrs['att_ext'] = self.DEFAULT_DATA_EXTS[attrs['mimetype']]
return attrs
def open_for_data(self, name_fmt=None, attributes={}):
filename = self._make_data_filename(
name_fmt, self._make_data_attributes(attributes))
return filename, open(filename, 'w')
# Rendering helpers for templating and such
def render_json(self, data):
"""Render data as JSON"""
class NoFailEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode,
int, float, bool, type(None))):
return JSONEncoder.default(self, obj)
if isinstance(obj, datetime.datetime):
return str(obj)
return "COMPLEXBLOB"
return json.dumps(data, indent=1, cls=NoFailEncoder,
sort_keys=True, allow_nan=False)
def _web_template(self, config, tpl_names, elems=None):
env = config.jinja_env
env.session = Session(config)
env.session.ui = HttpUserInteraction(None, config, log_parent=self)
for fn in tpl_names:
try:
# FIXME(Security): Here we need to sanitize the file name
# very strictly in case it somehow came
# from user data.
return env.get_template(fn)
except (IOError, OSError, AttributeError), e:
pass
return None
def render_web(self, cfg, tpl_names, data):
"""Render data as HTML"""
alldata = default_dict(self.html_variables)
alldata["config"] = cfg
alldata.update(data)
try:
template = self._web_template(cfg, tpl_names)
if template:
return template.render(alldata)
else:
emsg = _("<h1>Template not found</h1>\n<p>%s</p><p>"
"<b>DATA:</b> %s</p>")
tpl_esc_names = [escape_html(tn) for tn in tpl_names]
return emsg % (' or '.join(tpl_esc_names),
escape_html('%s' % alldata))
except (UndefinedError, ):
emsg = _("<h1>Template error</h1>\n"
"<pre>%s</pre>\n<p>%s</p><p><b>DATA:</b> %s</p>")
return emsg % (escape_html(traceback.format_exc()),
' or '.join([escape_html(tn) for tn in tpl_names]),
escape_html('%.4096s' % alldata))
except (TemplateNotFound, TemplatesNotFound), e:
emsg = _("<h1>Template not found in %s</h1>\n"
"<b>%s</b><br/>"
"<div><hr><p><b>DATA:</b> %s</p></div>")
return emsg % tuple([escape_html(unicode(v))
for v in (e.name, e.message,
'%.4096s' % alldata)])
except (TemplateError, TemplateSyntaxError,
TemplateAssertionError,), e:
emsg = _("<h1>Template error in %s</h1>\n"
"Parsing template %s: <b>%s</b> on line %s<br/>"
"<div><xmp>%s</xmp><hr><p><b>DATA:</b> %s</p></div>")
return emsg % tuple([escape_html(unicode(v))
for v in (e.name, e.filename, e.message,
e.lineno, e.source,
'%.4096s' % alldata)])
def edit_messages(self, session, emails):
if not self.interactive:
return False
for e in emails:
if not e.is_editable():
from mailpile.mailutils import NotEditableError
raise NotEditableError(_('Message %s is not editable')
% e.msg_mid())
sep = '-' * 79 + '\n'
edit_this = ('\n'+sep).join([e.get_editing_string() for e in emails])
self.block()
tf = tempfile.NamedTemporaryFile()
tf.write(edit_this.encode('utf-8'))
tf.flush()
os.system('%s %s' % (os.getenv('VISUAL', default='vi'), tf.name))
tf.seek(0, 0)
edited = tf.read().decode('utf-8')
tf.close()
self.unblock()
if edited == edit_this:
return False
updates = [t.strip() for t in edited.split(sep)]
if len(updates) != len(emails):
raise ValueError(_('Number of edit messages does not match!'))
for i in range(0, len(updates)):
emails[i].update_from_string(session, updates[i])
return True
def get_password(self, prompt):
if not self.interactive:
return ''
try:
self.block()
return getpass.getpass(prompt.encode('utf-8')).decode('utf-8')
finally:
self.unblock()
class HttpUserInteraction(UserInteraction):
LOG_PREFIX = 'http/'
def __init__(self, request, *args, **kwargs):
UserInteraction.__init__(self, *args, **kwargs)
self.request = request
self.logged = []
self.results = []
# Just buffer up rendered data
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
self.logged.append((level, text))
def _display_result(self, result):
self.results.append(result)
# Stream raw data to the client on open_for_data
def open_for_data(self, name_fmt=None, attributes={}):
return 'HTTP Client', RawHttpResponder(self.request, attributes)
def _render_text_responses(self, config):
if config.sys.debug:
return '%s\n%s' % (
'\n'.join([l[1] for l in self.logged]),
('\n%s\n' % ('=' * 79)).join(self.results)
)
else:
return ('\n%s\n' % ('=' * 79)).join(self.results)
def _render_single_response(self, config):
if len(self.results) == 1:
return self.results[0]
if len(self.results) > 1:
raise Exception(_('FIXME: Multiple results, OMG WTF'))
return ""
def render_response(self, config):
if (self.render_mode == 'json' or
self.render_mode.split('.')[-1] in ('jcss', 'jhtml', 'jjs',
'jrss', 'jtxt', 'jxml')):
if len(self.results) == 1:
return ('application/json', self.results[0])
else:
return ('application/json', '[%s]' % ','.join(self.results))
elif self.render_mode.endswith('html'):
return ('text/html', self._render_single_response(config))
elif self.render_mode.endswith('js'):
return ('text/javascript', self._render_single_response(config))
elif self.render_mode.endswith('css'):
return ('text/css', self._render_single_response(config))
elif self.render_mode.endswith('txt'):
return ('text/plain', self._render_single_response(config))
elif self.render_mode.endswith('rss'):
return ('application/rss+xml',
self._render_single_response(config))
elif self.render_mode.endswith('xml'):
return ('application/xml', self._render_single_response(config))
else:
return ('text/plain', self._render_text_responses(config))
def edit_messages(self, session, emails):
return False
class BackgroundInteraction(UserInteraction):
LOG_PREFIX = 'bg/'
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
def edit_messages(self, session, emails):
return False
class SilentInteraction(UserInteraction):
LOG_PREFIX = 'silent/'
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
def _display_result(self, result):
return result
def edit_messages(self, session, emails):
return False
class RawHttpResponder:
def __init__(self, request, attributes={}):
self.raised = False
self.request = request
#
# FIXME: Security risks here, untrusted content may find its way into
# our raw HTTP headers etc.
#
mimetype = attributes.get('mimetype', 'application/octet-stream')
filename = attributes.get('filename', 'attachment.dat'
).replace('"', '')
disposition = attributes.get('disposition', 'attachment')
length = attributes['length']
request.send_http_response(200, 'OK')
headers = [
('Content-Length', length),
]
if disposition and filename:
encfilename = urllib.quote(filename.encode("utf-8"))
headers.append(('Content-Disposition',
'%s; filename*=UTF-8\'\'%s' % (disposition,
encfilename)))
elif disposition:
headers.append(('Content-Disposition', disposition))
request.send_standard_headers(header_list=headers,
mimetype=mimetype)
def write(self, data):
self.request.wfile.write(data)
def close(self):
if not self.raised:
self.raised = True
raise SuppressHtmlOutput()
class Session(object):
def __init__(self, config):
self.config = config
self.main = False
self.order = None
self.wait_lock = threading.Condition(UiRLock())
self.results = []
self.searched = []
self.displayed = (0, 0)
self.task_results = []
self.ui = UserInteraction(config)
def set_interactive(self, val):
self.ui.interactive = val
interactive = property(lambda s: s.ui.interactive,
lambda s, v: s.set_interactive(v))
def report_task_completed(self, name, result):
with self.wait_lock:
self.task_results.append((name, result))
self.wait_lock.notify_all()
def report_task_failed(self, name):
self.report_task_completed(name, None)
def wait_for_task(self, wait_for, quiet=False):
while not mailpile.util.QUITTING:
with self.wait_lock:
for i in range(0, len(self.task_results)):
if self.task_results[i][0] == wait_for:
tn, rv = self.task_results.pop(i)
self.ui.reset_marks(quiet=quiet)
return rv
self.wait_lock.wait()
def error(self, message):
self.ui.error(message)
if not self.interactive:
sys.exit(1)
| 33.913897 | 79 | 0.552047 |
bae744ad26de4fa115da5206e77a7e1b12426a93 | 518 | py | Python | AnalogLogger/parse.py | danielvilas/ArduinoCurrentMonitor | a3902d29ab4e4ffadf7fb4910950a60eeb16f57b | [
"MIT"
] | 1 | 2018-12-05T20:35:19.000Z | 2018-12-05T20:35:19.000Z | AnalogLogger/parse.py | danielvilas/ArduinoCurrentMonitor | a3902d29ab4e4ffadf7fb4910950a60eeb16f57b | [
"MIT"
] | null | null | null | AnalogLogger/parse.py | danielvilas/ArduinoCurrentMonitor | a3902d29ab4e4ffadf7fb4910950a60eeb16f57b | [
"MIT"
] | null | null | null | import struct
from matplotlib import pyplot as plt
import numpy as np
time=[]
a0=[]
a1=[]
with open("myfile", "rb") as f:
bytes = f.read(8)
while bytes:
#t0=bytes[0]<<24+bytes[1]<<16+bytes[2]<<8+bytes[3]
data=struct.unpack('>IHH',bytes)
print(data)
time.append(data[0])
a0.append(data[1])
a1.append(data[2])
bytes = f.read(8)
#print(a0);
data=np.arange(0,len(time))
plt.plot(data,time)
plt.show()
plt.plot(time,a0,'o')
plt.plot(time,a1)
plt.show();
| 17.862069 | 58 | 0.588803 |
623f0957c3ec902e2d793fb6f525595aa6f56928 | 2,630 | py | Python | alps/docs.py | michalporeba/alps-py | bfbfd048437e1fb77d253bc649d3965257dd557c | [
"MIT"
] | null | null | null | alps/docs.py | michalporeba/alps-py | bfbfd048437e1fb77d253bc649d3965257dd557c | [
"MIT"
] | null | null | null | alps/docs.py | michalporeba/alps-py | bfbfd048437e1fb77d253bc649d3965257dd557c | [
"MIT"
] | null | null | null | from diogi.functions import *
class Doc:
# href, format, tag
def __init__(
self, href: str = None, format: str = None, tag: str = None, value: str = None
):
self.href = href
self.format = format
self.tag = tag
self.value = value
def as_data(self):
data = {}
set_if_not_none(data, self.href, "href")
set_if_not_none(data, self.format, "format")
set_if_not_none(data, self.tag, "tag")
set_if_not_none(data, self.value, "value")
return data
@staticmethod
def parse(obj: any):
if str == type(obj):
return TextDoc(obj)
if dict == type(obj):
value = get_if_exists(obj, "value", None)
format = get_if_exists(obj, "format", "text")
href = get_if_exists(obj, "href", None)
tag = get_if_exists(obj, "tag", None)
if format == "markdown":
return MarkDownDoc(value=default_if_none(value, ""), href=href, tag=tag)
if format == "html":
return HtmlDoc(href=href, tag=tag, value=default_if_none(value, ""))
return TextDoc(value=default_if_none(value, str(obj)), href=href, tag=tag)
return TextDoc()
def __eq__(self, other):
if type(other) is type(self):
return (
self.format == other.format
and self.href == other.href
and self.tag == other.tag
and self.value == other.value
)
else:
return False
def __hash__(self):
return hash((self.format, self.href, self.tag, self.value))
class WithDocsMixin:
def __init__(self, *args, **kwargs):
super(WithDocsMixin, self).__init__(*args, **kwargs)
def add_doc(self, doc: any):
if not isinstance(doc, Doc):
doc = Doc.parse(doc)
append_if_not_none(self.contents, doc, "doc")
return self
@property
def docs(self):
return always_a_list(get_if_exists(self.contents, "doc", []))
class HtmlDoc(Doc):
def __init__(self, href: str, value: str, *args, **kwargs):
super(HtmlDoc, self).__init__(
href=href, format="html", value=value, *args, **kwargs
)
class MarkDownDoc(Doc):
def __init__(self, value: str, *args, **kwargs):
super(MarkDownDoc, self).__init__(
format="markdown", value=value, *args, **kwargs
)
class TextDoc(Doc):
def __init__(self, value: str = "", *args, **kwargs):
super(TextDoc, self).__init__(format="text", value=value, *args, **kwargs)
| 29.550562 | 88 | 0.565399 |
e82b5dd4f6d257bb38d921ee592707269fd5b432 | 4,406 | py | Python | archiveOldVersions/commonFunctions_v09.py | remichartier/014_selfDrivingCarND_BehavioralCloningProject | 1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff | [
"MIT"
] | 1 | 2021-02-23T08:28:54.000Z | 2021-02-23T08:28:54.000Z | archiveOldVersions/commonFunctions_v09.py | remichartier/014_selfDrivingCarND_BehavioralCloningProject | 1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff | [
"MIT"
] | null | null | null | archiveOldVersions/commonFunctions_v09.py | remichartier/014_selfDrivingCarND_BehavioralCloningProject | 1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff | [
"MIT"
] | null | null | null | import os
import csv
import cv2
import numpy as np # for np.array() np.append()
from datetime import datetime # print timestamps
# for loss history visualization image
import matplotlib; matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy import ndimage
# list of common functions
# from commonFunctions_vxx import get_log_path
# from commonFunctions_vxx import get_lines_logfile
# from commonFunctions_vxx import get_info_from_lines
# from commonFunctions_vxx import get_info_from_logfile
# from commonFunctions_vxx import flip_horizontally
# from commonFunctions_vxx import visualize_loss_history
# from commonFunctions_vxx import RGB2YUV
# from commonFunctions_vxx import print_info
# from commonFunctions_vxx import
# History
# v01 : Start
# v02 : add nb_images to read parameter
# v03 : from scipy import ndimage, due to cv2.imread will get images in BGR format, while drive.py uses RGB. In the video above one way you could keep the same image formatting is to do "image = ndimage.imread(current_path)"
# v04 : use left + right images to augment data + measurements extrapolations
# v05 : add Generator Function + modify all other functions whenever necessary to use generator function ...
# v06 : Re-start from v04 aas fit_generator and need to add generator obsolete.
# Latest Keras.Model.fit integrates a generator in itself.
# ie v06 : visualize loss history
# v07 : For nvidia model, convert RGB to YUV
# v08 : add print_info() to print debug/progress info
# v12 : Try to avoid list to numpy conversion, taking few minutes, start with numpy image array straight from start
# But failed. Need to use and adapt to Generator
driving_log_file = 'driving_log.csv'
# Select right sample data folder whether in GPU mode or not
# check if ./data/driving_log.csv exists otherwise select
# simulationData/001_1stTrackSampleDrivingData/
def get_log_path() :
if os.path.exists("./data/" + driving_log_file) :
return("./data/")
else :
return("./simulationData/001_1stTrackSampleDrivingData/")
def get_lines_logfile() :
l = []
with open (get_log_path() + driving_log_file ) as csv_file :
reader = csv.reader(csv_file)
for line in reader :
l.append(line)
return l
def get_info_from_lines(l,leftright_steer_corr,nb_images=None) :
imgs = []
meas = []
log_path = get_log_path()
first_im=ndimage.imread(log_path + l[1][0].strip())
row,col,channel = first_im.shape
print('image shape : {}'.format(first_im.shape))
np_imgs = None
nb = 2*len(l)
print('nb : {}'.format(nb))
np_imgs = np.zeros((nb,row,col,channel))
print('Size np_imgs : {}'.format(np_imgs.shape))
for line in l[1:nb_images] :
#image = cv2.imread(log_path + line[0].strip())
for i in range(3) : # center image, left , right images
image = ndimage.imread(log_path + line[i].strip())
imgs.append(image)
measurement = float(line[3]) # center image
meas.append(measurement)
measurement += leftright_steer_corr # left image
meas.append(measurement)
measurement -= leftright_steer_corr # right image
meas.append(measurement)
return imgs,meas
def get_info_from_logfile(leftright_steer_correction,nb_images=None) :
lines = get_lines_logfile()
return get_info_from_lines(lines,leftright_steer_correction,nb_images)
def flip_horizontally(img,meas):
aug_img, aug_meas = [],[]
for i,m in zip(img,meas) :
aug_img.append(cv2.flip(i,1))
aug_meas.append(m*(-1.0))
return aug_img,aug_meas
def visualize_loss_history(history) :
### plot the training and validation loss for each epoch
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
# plt.show()
plt.savefig('lossHistory.png')
def RGB2YUV(im):
yuv = []
for i in im :
yuv.append(cv2.cvtColor(i, cv2.COLOR_RGB2YUV))
return yuv
def print_info(info):
now = datetime.now()
infotime = now.strftime("%H:%M:%S")
# can not use f-string due to GPU python version v3.5.2
print('{}. Time : {}'.format(info,infotime))
| 34.968254 | 224 | 0.69655 |
5828553283257f99b070710485710b33e99af795 | 623 | py | Python | fairy_chess/config.py | ratopythonista/fairy-chess-backend | 2eb3a43ea896160a38c69c42be81c0dc5506bba3 | [
"MIT"
] | null | null | null | fairy_chess/config.py | ratopythonista/fairy-chess-backend | 2eb3a43ea896160a38c69c42be81c0dc5506bba3 | [
"MIT"
] | null | null | null | fairy_chess/config.py | ratopythonista/fairy-chess-backend | 2eb3a43ea896160a38c69c42be81c0dc5506bba3 | [
"MIT"
] | null | null | null | import os
PGSQL_DB = os.environ.get('PGSQL_DB', 'annamae')
PGSQL_HOST = os.environ.get('PGSQL_HOST', 'localhost')
PGSQL_PASS = os.environ.get('PGSQL_PASS', 'annamae')
PGSQL_PORT = os.environ.get('PGSQL_PORT', '5432')
PGSQL_USR = os.environ.get('PGSQL_USR', 'annamae')
EMAIL_PASS = os.environ.get('EMAIL_PASS', None)
# openssl rand -hex 32
SECRET_KEY = '4d3dd578d2c2cacc4505eff68e205136b1dc2cd7d939038ae96cbbee9fa42003'
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
DESCRIPTION = """Fairy Chess TFT Tournament and Statistics<br>
<b>** To test the API using the swagger query the endpoint: `/swagger`.</b>"""
| 32.789474 | 82 | 0.747994 |
bea4c12415ea9151219c025c89651f0b061d83c5 | 6,250 | py | Python | tests/contrib/operators/test_mssql_to_gcs_operator.py | fxdmhtt/airflow | cf88f7bc7bbd3e9bf110e98f025759a96c130235 | [
"Apache-2.0"
] | 3 | 2019-03-28T05:59:39.000Z | 2019-10-03T22:05:25.000Z | tests/contrib/operators/test_mssql_to_gcs_operator.py | fxdmhtt/airflow | cf88f7bc7bbd3e9bf110e98f025759a96c130235 | [
"Apache-2.0"
] | 7 | 2019-03-27T07:58:14.000Z | 2020-02-12T17:42:33.000Z | tests/contrib/operators/test_mssql_to_gcs_operator.py | upjohnc/airflow-upjohn-k8s | caadbc1618d73e054de99138b0892cea3a9327c4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5 | 2017-06-19T19:55:47.000Z | 2020-10-10T00:49:20.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.contrib.operators.mssql_to_gcs import \
MsSqlToGoogleCloudStorageOperator
from tests.compat import mock
TASK_ID = 'test-mssql-to-gcs'
MSSQL_CONN_ID = 'mssql_conn_test'
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
GZIP = False
ROWS = [
('mock_row_content_1', 42),
('mock_row_content_2', 43),
('mock_row_content_3', 44)
]
CURSOR_DESCRIPTION = (
('some_str', 0, None, None, None, None, None),
('some_num', 3, None, None, None, None, None)
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n'
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "NULLABLE", "name": "some_str", "type": "STRING"}, ',
b'{"mode": "NULLABLE", "name": "some_num", "type": "INTEGER"}]'
]
class MsSqlToGoogleCloudStorageOperatorTest(unittest.TestCase):
def test_init(self):
"""Test MySqlToGoogleCloudStorageOperator instance is properly initialized."""
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME)
self.assertEqual(op.task_id, TASK_ID)
self.assertEqual(op.sql, SQL)
self.assertEqual(op.bucket, BUCKET)
self.assertEqual(op.filename, JSON_FILENAME)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.mssql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_json(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME)
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(JSON_FILENAME.format(0), obj)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(NDJSON_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mssql_hook_mock_class.assert_called_once_with(mssql_conn_id=MSSQL_CONN_ID)
mssql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.mssql_to_gcs.GoogleCloudStorageHook')
def test_file_splitting(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(expected_upload[obj], file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]))
op.execute(None)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.mssql_to_gcs.GoogleCloudStorageHook')
def test_schema_file(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test writing schema files."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
if obj == SCHEMA_FILENAME:
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(SCHEMA_JSON), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
schema_filename=SCHEMA_FILENAME)
op.execute(None)
# once for the file and once for the schema
self.assertEqual(2, gcs_hook_mock.upload.call_count)
| 40.322581 | 86 | 0.6912 |
ca57288ce6908252259a0130159dab61f6b468d6 | 27,241 | py | Python | scipy/sparse/sparsetools/coo.py | tomspur/scipy | 5309706537dbd96e0409f890a20fc6f5badfbac3 | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/sparsetools/coo.py | tomspur/scipy | 5309706537dbd96e0409f890a20fc6f5badfbac3 | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/sparsetools/coo.py | tomspur/scipy | 5309706537dbd96e0409f890a20fc6f5badfbac3 | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_coo', [dirname(__file__)])
except ImportError:
import _coo
return _coo
if fp is not None:
try:
_mod = imp.load_module('_coo', fp, pathname, description)
finally:
fp.close()
return _mod
_coo = swig_import_helper()
del swig_import_helper
else:
import _coo
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def coo_tocsr(*args):
"""
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_bool_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, npy_bool_wrapper [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, signed char const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
signed char [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned char const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, unsigned char [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, short const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
short [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned short const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, unsigned short [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, int const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
int [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned int const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, unsigned int [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, long long const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
long long [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned long long const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, unsigned long long [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, float const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
float [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, double const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
double [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, long double const [] Ax, npy_int32 [] Bp, npy_int32 [] Bj,
long double [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_cfloat_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, npy_cfloat_wrapper [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_cdouble_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, npy_cdouble_wrapper [] Bx)
coo_tocsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_clongdouble_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bj, npy_clongdouble_wrapper [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_bool_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, npy_bool_wrapper [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, signed char const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
signed char [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned char const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, unsigned char [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, short const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
short [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned short const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, unsigned short [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, int const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
int [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned int const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, unsigned int [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, long long const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
long long [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned long long const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, unsigned long long [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, float const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
float [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, double const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
double [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, long double const [] Ax, npy_int64 [] Bp, npy_int64 [] Bj,
long double [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_cfloat_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, npy_cfloat_wrapper [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_cdouble_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, npy_cdouble_wrapper [] Bx)
coo_tocsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_clongdouble_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, npy_clongdouble_wrapper [] Bx)
"""
return _coo.coo_tocsr(*args)
def coo_tocsc(*args):
"""
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_bool_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, npy_bool_wrapper [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, signed char const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
signed char [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned char const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, unsigned char [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, short const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
short [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned short const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, unsigned short [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, int const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
int [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned int const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, unsigned int [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, long long const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
long long [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned long long const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, unsigned long long [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, float const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
float [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, double const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
double [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, long double const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
long double [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_cfloat_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, npy_cfloat_wrapper [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_cdouble_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, npy_cdouble_wrapper [] Bx)
coo_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_clongdouble_wrapper const [] Ax, npy_int32 [] Bp,
npy_int32 [] Bi, npy_clongdouble_wrapper [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_bool_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, npy_bool_wrapper [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, signed char const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
signed char [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned char const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, unsigned char [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, short const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
short [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned short const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, unsigned short [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, int const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
int [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned int const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, unsigned int [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, long long const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
long long [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned long long const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, unsigned long long [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, float const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
float [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, double const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
double [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, long double const [] Ax, npy_int64 [] Bp, npy_int64 [] Bi,
long double [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_cfloat_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, npy_cfloat_wrapper [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_cdouble_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, npy_cdouble_wrapper [] Bx)
coo_tocsc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_clongdouble_wrapper const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bi, npy_clongdouble_wrapper [] Bx)
"""
return _coo.coo_tocsc(*args)
def coo_todense(*args):
"""
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_bool_wrapper const [] Ax, npy_bool_wrapper [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, signed char const [] Ax, signed char [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned char const [] Ax, unsigned char [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, short const [] Ax, short [] Bx, int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned short const [] Ax, unsigned short [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, int const [] Ax, int [] Bx, int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned int const [] Ax, unsigned int [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, long long const [] Ax, long long [] Bx, int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, unsigned long long const [] Ax, unsigned long long [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, float const [] Ax, float [] Bx, int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, double const [] Ax, double [] Bx, int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, long double const [] Ax, long double [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_cfloat_wrapper const [] Ax, npy_cfloat_wrapper [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_cdouble_wrapper const [] Ax, npy_cdouble_wrapper [] Bx,
int fortran)
coo_todense(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const nnz, npy_int32 const [] Ai,
npy_int32 const [] Aj, npy_clongdouble_wrapper const [] Ax, npy_clongdouble_wrapper [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_bool_wrapper const [] Ax, npy_bool_wrapper [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, signed char const [] Ax, signed char [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned char const [] Ax, unsigned char [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, short const [] Ax, short [] Bx, int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned short const [] Ax, unsigned short [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, int const [] Ax, int [] Bx, int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned int const [] Ax, unsigned int [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, long long const [] Ax, long long [] Bx, int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, unsigned long long const [] Ax, unsigned long long [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, float const [] Ax, float [] Bx, int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, double const [] Ax, double [] Bx, int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, long double const [] Ax, long double [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_cfloat_wrapper const [] Ax, npy_cfloat_wrapper [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_cdouble_wrapper const [] Ax, npy_cdouble_wrapper [] Bx,
int fortran)
coo_todense(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const nnz, npy_int64 const [] Ai,
npy_int64 const [] Aj, npy_clongdouble_wrapper const [] Ax, npy_clongdouble_wrapper [] Bx,
int fortran)
"""
return _coo.coo_todense(*args)
def coo_matvec(*args):
"""
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, npy_bool_wrapper const [] Ax,
npy_bool_wrapper const [] Xx, npy_bool_wrapper [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, signed char const [] Ax,
signed char const [] Xx, signed char [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, unsigned char const [] Ax,
unsigned char const [] Xx, unsigned char [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, short const [] Ax,
short const [] Xx, short [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, unsigned short const [] Ax,
unsigned short const [] Xx, unsigned short [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, int const [] Ax,
int const [] Xx, int [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, unsigned int const [] Ax,
unsigned int const [] Xx, unsigned int [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, long long const [] Ax,
long long const [] Xx, long long [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, unsigned long long const [] Ax,
unsigned long long const [] Xx, unsigned long long [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, float const [] Ax,
float const [] Xx, float [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, double const [] Ax,
double const [] Xx, double [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, long double const [] Ax,
long double const [] Xx, long double [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, npy_cfloat_wrapper const [] Ax,
npy_cfloat_wrapper const [] Xx, npy_cfloat_wrapper [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, npy_cdouble_wrapper const [] Ax,
npy_cdouble_wrapper const [] Xx, npy_cdouble_wrapper [] Yx)
coo_matvec(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj, npy_clongdouble_wrapper const [] Ax,
npy_clongdouble_wrapper const [] Xx, npy_clongdouble_wrapper [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, npy_bool_wrapper const [] Ax,
npy_bool_wrapper const [] Xx, npy_bool_wrapper [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, signed char const [] Ax,
signed char const [] Xx, signed char [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, unsigned char const [] Ax,
unsigned char const [] Xx, unsigned char [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, short const [] Ax,
short const [] Xx, short [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, unsigned short const [] Ax,
unsigned short const [] Xx, unsigned short [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, int const [] Ax,
int const [] Xx, int [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, unsigned int const [] Ax,
unsigned int const [] Xx, unsigned int [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, long long const [] Ax,
long long const [] Xx, long long [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, unsigned long long const [] Ax,
unsigned long long const [] Xx, unsigned long long [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, float const [] Ax,
float const [] Xx, float [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, double const [] Ax,
double const [] Xx, double [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, long double const [] Ax,
long double const [] Xx, long double [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, npy_cfloat_wrapper const [] Ax,
npy_cfloat_wrapper const [] Xx, npy_cfloat_wrapper [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, npy_cdouble_wrapper const [] Ax,
npy_cdouble_wrapper const [] Xx, npy_cdouble_wrapper [] Yx)
coo_matvec(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj, npy_clongdouble_wrapper const [] Ax,
npy_clongdouble_wrapper const [] Xx, npy_clongdouble_wrapper [] Yx)
"""
return _coo.coo_matvec(*args)
def coo_count_diagonals(*args):
"""
coo_count_diagonals(npy_int32 const nnz, npy_int32 const [] Ai, npy_int32 const [] Aj) -> npy_int32
coo_count_diagonals(npy_int64 const nnz, npy_int64 const [] Ai, npy_int64 const [] Aj) -> npy_int64
"""
return _coo.coo_count_diagonals(*args)
# This file is compatible with both classic and new-style classes.
| 64.859524 | 119 | 0.671525 |
4ab58cde482e0395e718c66339e11d6124542284 | 11,195 | py | Python | mne_frequency_tagging/frequency_tagging.py | dominikwelke/mne-frequency-tagging | 4f08127b1d1bd766dbe5b89d214b2765e0a0d5a4 | [
"BSD-3-Clause"
] | null | null | null | mne_frequency_tagging/frequency_tagging.py | dominikwelke/mne-frequency-tagging | 4f08127b1d1bd766dbe5b89d214b2765e0a0d5a4 | [
"BSD-3-Clause"
] | null | null | null | mne_frequency_tagging/frequency_tagging.py | dominikwelke/mne-frequency-tagging | 4f08127b1d1bd766dbe5b89d214b2765e0a0d5a4 | [
"BSD-3-Clause"
] | null | null | null | """
module for analysis of neurophysiological data obtained with frequency-tagging methodology (visual or auditory).
Author: Dominik Welke <dominik.welke@web.de>
"""
import mne
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# utils
def _make_montage(montage, verbose=False):
if montage is None: # take default 10-20 montage
montage = mne.channels.make_standard_montage(
'standard_1020', head_size=0.095) # head_size parameter default = 0.095
if verbose:
print('create standard montage following 10-20 system')
elif type(montage) is str: # expect this to be
try:
if verbose:
print('try to create standard montage following "%s" template' % montage)
montage = mne.channels.make_standard_montage(montage, head_size=0.095) # head_size parameter default 0.095
except ValueError as e:
raise ValueError(
'provide id of a standard montage as implemented in mne.channels.make_standard_montage or leave '
'empty.\ndetails: %s' % e)
else:
pass
return montage
# spectra operations
def snr_spectrum(psd, noise_n_neighborfreqs=1, noise_skip_neighborfreqs=1):
"""
Parameters
----------
psd - np.array
containing psd values as spit out by mne functions. must be 2d or 3d
with frequencies in the last dimension
noise_n_neighborfreqs - int
number of neighboring frequencies used to compute noise level.
increment by one to add one frequency bin ON BOTH SIDES
noise_skip_neighborfreqs - int
set this >=1 if you want to exclude the immediately neighboring
frequency bins in noise level calculation
Returns
-------
snr - np.array
array containing snr for all epochs, channels, frequency bins.
NaN for frequencies on the edge, that do not have enoug neighbors on
one side to calculate snr
"""
# prep not epoched / single channel data
is_2d = True if (len(psd.shape) == 2) else False
if is_2d:
psd = psd.reshape((1, psd.shape[0], psd.shape[1]))
# SNR loop
snr = np.empty(psd.shape)
for i_freq in range(psd.shape[2]):
# skip freqs on the edges (without noise neighbors)
start_freq_i = noise_n_neighborfreqs + noise_skip_neighborfreqs
stop_freq_i = (psd.shape[2] - noise_n_neighborfreqs
- noise_skip_neighborfreqs)
if not (stop_freq_i > i_freq >= start_freq_i):
snr[:, :, i_freq] = np.nan
continue
# extract signal level
signal = psd[:, :, i_freq]
# ... and average noise level
i_noise = []
for i in range(noise_n_neighborfreqs):
i_noise.append(i_freq + noise_skip_neighborfreqs + i + 1)
i_noise.append(i_freq - noise_skip_neighborfreqs - i - 1)
noise = psd[:, :, i_noise].mean(axis=2)
snr[:, :, i_freq] = signal / noise
# reshape not epoched / single channel data to original dimensions
if is_2d:
snr = snr.reshape(snr.shape[1], snr.shape[2])
return snr
def snr_at_frequency(snrs, freqs, stim_freq, verbose=False):
"""
"""
# get position closest to wanted frequency
tmp_distlist = abs(np.subtract(freqs, stim_freq))
i_signal = np.where(tmp_distlist == min(tmp_distlist))[0][0]
# could be updated to support multiple frequencies
# check dimensionality
dimensionality = len(snrs.shape)
if dimensionality == 3:
snrs_stim = snrs[:, :, i_signal] # trial subselection can be done here
elif dimensionality == 2:
snrs_stim = snrs[:, i_signal] # trial subselection can be done here
elif dimensionality == 1:
snrs_stim = snrs
else:
raise ValueError('SNR array has more that 3 dimensions. whats happening?')
if verbose:
print('average SNR at %iHz: %.3f '
% (stim_freq, snrs_stim.mean()))
return snrs_stim
# visualization function
def plot_snr_spectrum(snrs, freqs, stim_freq=None, bg_var_trials=False, bg_var_channels=False, show=True):
"""
Parameters
----------
snrs - np.array
array containing snr for all epochs, channels, frequency bins.
NaN for frequencies on the edge, that do not have enoug neighbors on
one side to calculate snr
freqs - list, np.array
containing all frequencies you calculated snr-vlues for.
stim_freq - list
stimulation frequencies, or any other frequency you want to be marked by a vertical line
bg_var_trials - bool
set to True, it you want the grand average SNR to be underlayed with average SNR by trial (blue, alpha=0.1)
bg_var_channels - bool
set to True, it you want the grand average SNR to be underlayed with average SNR by channel (green, alpha=0.1)
show - bool
show figure or not
Returns
-------
fig - matplotlib.figure.Figure
axes - matplotlib.axes.AxesSubplot
"""
fig, axes = plt.subplots(1, 1, sharex='all', sharey='all', dpi=300)
# check format
dimension = len(snrs.shape)
if dimension > 3: # more than 3d array
raise ValueError('SNR array has more that 3 dimensions. whats happening?')
# Average over trials
if bg_var_trials and (dimension == 3):
axes.plot(freqs, snrs.mean(axis=0).T, color='b', alpha=0.1)
# Average over channels
if bg_var_channels and (dimension == 3):
axes.plot(freqs, snrs.mean(axis=1).T, color='g', alpha=0.1)
# annotate stim frequencies
if stim_freq:
if type(stim_freq) is int:
axes.axvline(x=stim_freq, ls=':')
elif type(stim_freq) in [list, np.ndarray]:
for sf in stim_freq:
axes.axvline(x=sf, ls=':')
else:
raise Warning('unsupported format for frequency annotations. will be ignored ')
# grand average SNR over trials and channels as stem plot
for i in range(dimension-1):
snrs = snrs.mean(axis=0)
axes.stem(freqs, snrs, linefmt='r-', markerfmt='rD')
axes.set(title="SNR spectrum", xlabel='Frequency [Hz]',
ylabel='SNR', ylim=[0, np.ceil(np.nanmax(snrs)+1)])
# show plot or not?
if show:
fig.show()
return fig, axes
def plot_snr_topography(snrs_at_frequency, ch_names, montage=None, plot_montage=False, show=False, verbose=False):
"""
Parameters
----------
snrs_at_frequency - numpy.Array, list
list or array with snr at a given frequency.
if list or 1d array, length must correspond to len(ch_names).
if 2d or 3d array, 1st dimension reflects number of trials, 2nd dimension number of channels.
if 3d dimension is larger than 1, results might not make sense - average is taken.
ch_names - list
list of channel names, e.g. obtained from raw.Info['ch_names']
montage - mne.channels.montage.DigMontage, str, None
specify the montage for visualization.
can be the exact montage as used, but also a standard montage.
if str, specify name of a standard montage provided with mne.channels.make_standard_montage() function.
if None, standard 10-20 montage is used
plot_montage - bool
also show a plot of the general montage?
show - bool
show plot or not
verbose - bool
print some additional info
Returns
-------
fig - matplotlib.figure.Figure
axes - matplotlib.axes.AxesSubplot
"""
# get channel locations from montage
montage = _make_montage(montage, verbose=verbose)
# convert digitization to xyz coordinates
montage.positions = montage._get_ch_pos() # luckily i dug this out in the mne code!
# plot montage, if wanted
if plot_montage:
montage.plot(show=True)
# get grand average SNR per channel (all subs, all trials) and electrode labels
dimensionality = len(snrs_at_frequency.shape) if type(snrs_at_frequency) != list else 1
if dimensionality == 1:
snr_grave = snrs_at_frequency
elif dimensionality == 2:
snr_grave = snrs_at_frequency.mean(axis=0)
elif dimensionality == 3:
snr_grave = snrs_at_frequency.mean(axis=2).mean(axis=0)
else:
raise ValueError('SNR array has more that 3 dimensions. whats happening?')
# select only present channels from the standard montage
topo_pos_grave = []
[topo_pos_grave.append(montage.positions[ch][:2]) for ch in ch_names]
topo_pos_grave = np.array(topo_pos_grave)
# plot SNR topography
fig, axes = plt.subplots()
mne.viz.plot_topomap(snr_grave, topo_pos_grave, vmin=0, axes=axes, show=show)
if verbose:
print('plot topography of given snr array')
print("grand average SNR at given frequency: %f" % snr_grave.mean())
return fig, axes
def plot_psd_spectrum(psds, freqs, fmin=None, fmax=None, plot_type='average', show=True):
# plot average psd plus/minus std.
# code snippets from:
# https://martinos.org/mne/stable/auto_examples/time_frequency/plot_compute_raw_data_spectrum.html
# get dimensionality of data
dimensionality = len(psds.shape)
if dimensionality > 3:
raise ValueError('PSD array has more that 3 dimensions. whats happening?')
# get indices of plotted values
if fmin is None:
fmin = np.nanmin(freqs)
if fmax is None:
fmax = np.nanmax(freqs)
rng = range(np.where(np.floor(freqs) == fmin + 1)[0][0],
np.where(np.ceil(freqs) == fmax - 1)[0][0])
# prepare figure
fig, axes = plt.subplots(1, 1, sharex='all', sharey='all', dpi=300)
# prepare psd (transform to db scale)
psds_plot = 10 * np.log10(psds)
if plot_type == 'average' and dimensionality > 1:
# get mean and std
if dimensionality == 3:
psds_mean = psds_plot.mean((0, 1))[rng]
psds_std = psds_plot.std((0, 1))[rng]
else:
psds_mean = psds_plot.mean(axis=0)[rng]
psds_std = psds_plot.std(axis=0)[rng]
# plot
axes.plot(freqs[rng], psds_mean, color='b')
axes.fill_between(freqs[rng], psds_mean - psds_std, psds_mean + psds_std,
color='b', alpha=.5)
axes.set(title="PSD spectrum (average +- std)", xlabel='Frequency [Hz]',
ylabel='Power Spectral Density [dB]')
plt.xlim([fmin, fmax])
else:
# plot
if dimensionality == 1:
axes.plot(freqs[rng], psds_plot[rng], color='b')
elif dimensionality == 2:
axes.plot(
freqs[rng],
psds_plot[:, rng].T,
color='b', alpha=.5)
else:
axes.plot(
freqs[rng],
psds_plot.reshape(psds_plot.shape[0]*psds_plot.shape[1], psds_plot.shape[2])[:, rng].T,
color='b', alpha=.5)
axes.set(title="PSD spectrum (individual channels/trials)", xlabel='Frequency [Hz]',
ylabel='Power Spectral Density [dB]')
plt.xlim([fmin, fmax])
# show plot or not?
if show:
fig.show()
return fig, axes
| 36.465798 | 119 | 0.636356 |
dda00a689eeacf518c30280bc222b87817a03609 | 1,210 | py | Python | app/main/serializers.py | Ubaidkhan06/recipe-api-project | 1b651083747059111027e7562543914f0fa0e8f5 | [
"MIT"
] | null | null | null | app/main/serializers.py | Ubaidkhan06/recipe-api-project | 1b651083747059111027e7562543914f0fa0e8f5 | [
"MIT"
] | null | null | null | app/main/serializers.py | Ubaidkhan06/recipe-api-project | 1b651083747059111027e7562543914f0fa0e8f5 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only = ('id')
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredient objects"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only = ('id')
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer for recipe object"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset = Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many = True,
queryset = Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'ingredients',
'tags','price', 'time_minutes',
'link',)
read_only = ('id')
class RecipeDetailSerializer(RecipeSerializer):
"""Create detail recipe serializer"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True) | 25.744681 | 65 | 0.634711 |
7393c8a0939a6839d7708154fe2ef5f891d67ffa | 927 | py | Python | travel/docs/Amadeus-master/pactravel-master/python-client/test/test_carrier_meta.py | shopglobal/api | 176e1858d3f93e8e7854ba194698b6b9825841da | [
"CC-BY-4.0"
] | null | null | null | travel/docs/Amadeus-master/pactravel-master/python-client/test/test_carrier_meta.py | shopglobal/api | 176e1858d3f93e8e7854ba194698b6b9825841da | [
"CC-BY-4.0"
] | 1 | 2021-06-01T22:04:28.000Z | 2021-06-01T22:04:28.000Z | travel/docs/Amadeus-master/pactravel-master/python-client/test/test_carrier_meta.py | shopglobal/api | 176e1858d3f93e8e7854ba194698b6b9825841da | [
"CC-BY-4.0"
] | null | null | null | # coding: utf-8
"""
Amadeus Travel Innovation Sandbox
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.carrier_meta import CarrierMeta
class TestCarrierMeta(unittest.TestCase):
""" CarrierMeta unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCarrierMeta(self):
"""
Test CarrierMeta
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.carrier_meta.CarrierMeta()
pass
if __name__ == '__main__':
unittest.main()
| 20.6 | 105 | 0.700108 |
107f477fbe36b70665a89ac6e16a8597b4f9a95b | 2,301 | py | Python | djvue/views.py | abahnihi/djvue | 628e363297c676865aab28e83f04906d933540c0 | [
"MIT"
] | 13 | 2020-07-21T19:22:22.000Z | 2020-12-17T17:28:41.000Z | djvue/views.py | abahnihi/djvue | 628e363297c676865aab28e83f04906d933540c0 | [
"MIT"
] | 1 | 2021-01-26T11:37:38.000Z | 2021-01-26T11:37:38.000Z | djvue/views.py | abahnihi/djvue | 628e363297c676865aab28e83f04906d933540c0 | [
"MIT"
] | 3 | 2020-07-28T10:33:55.000Z | 2020-11-08T20:11:34.000Z | from rest_framework import status
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import FileUploadSerializer
class FileUploadView(APIView):
"""
Generic file upload view.
To be subclassed when different behaviour is requested.
IE:
- to add permissions, file upload shouldn't be public, only for specific cases
- handle multiple file uploads at once
- handle directory upload
- etc
"""
serializer_class = FileUploadSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.save()
headers = self.get_success_headers(data)
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
def get_success_headers(self, data):
try:
return {"Location": str(data[api_settings.URL_FIELD_NAME])}
except (TypeError, KeyError):
return {}
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs["context"] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method." % self.__class__.__name__
)
return self.serializer_class
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {"request": self.request, "format": self.format_kwarg, "view": self}
| 34.863636 | 88 | 0.672316 |
e88fd5974939ac756da6e43fc550e039bf368213 | 19,827 | py | Python | synapse/handlers/saml_handler.py | OttoHollmann/synapse | d6beabaec96ee99164e1cea644761a3b8ec7169c | [
"Apache-2.0"
] | null | null | null | synapse/handlers/saml_handler.py | OttoHollmann/synapse | d6beabaec96ee99164e1cea644761a3b8ec7169c | [
"Apache-2.0"
] | null | null | null | synapse/handlers/saml_handler.py | OttoHollmann/synapse | d6beabaec96ee99164e1cea644761a3b8ec7169c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import TYPE_CHECKING, Callable, Dict, Optional, Set, Tuple
import attr
import saml2
import saml2.response
from saml2.client import Saml2Client
from synapse.api.errors import SynapseError
from synapse.config import ConfigError
from synapse.config.saml2_config import SamlAttributeRequirement
from synapse.handlers._base import BaseHandler
from synapse.handlers.sso import MappingException, UserAttributes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.module_api import ModuleApi
from synapse.types import (
UserID,
map_username_to_mxid_localpart,
mxid_localpart_allowed_characters,
)
from synapse.util.iterutils import chunk_seq
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@attr.s(slots=True)
class Saml2SessionData:
"""Data we track about SAML2 sessions"""
# time the session was created, in milliseconds
creation_time = attr.ib()
# The user interactive authentication session ID associated with this SAML
# session (or None if this SAML session is for an initial login).
ui_auth_session_id = attr.ib(type=Optional[str], default=None)
class SamlHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._saml_client = Saml2Client(hs.config.saml2_sp_config)
self._saml_idp_entityid = hs.config.saml2_idp_entityid
self._saml2_session_lifetime = hs.config.saml2_session_lifetime
self._grandfathered_mxid_source_attribute = (
hs.config.saml2_grandfathered_mxid_source_attribute
)
self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements
self._error_template = hs.config.sso_error_template
# plugin to do custom mapping from saml response to mxid
self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class(
hs.config.saml2_user_mapping_provider_config,
ModuleApi(hs, hs.get_auth_handler()),
)
# identifier for the external_ids table
self.idp_id = "saml"
# user-facing name of this auth provider
self.idp_name = "SAML"
# we do not currently support icons/brands for SAML auth, but this is required by
# the SsoIdentityProvider protocol type.
self.idp_icon = None
self.idp_brand = None
# a map from saml session id to Saml2SessionData object
self._outstanding_requests_dict = {} # type: Dict[str, Saml2SessionData]
self._sso_handler = hs.get_sso_handler()
self._sso_handler.register_identity_provider(self)
async def handle_redirect_request(
self,
request: SynapseRequest,
client_redirect_url: Optional[bytes],
ui_auth_session_id: Optional[str] = None,
) -> str:
"""Handle an incoming request to /login/sso/redirect
Args:
request: the incoming HTTP request
client_redirect_url: the URL that we should redirect the
client to after login (or None for UI Auth).
ui_auth_session_id: The session ID of the ongoing UI Auth (or
None if this is a login).
Returns:
URL to redirect to
"""
if not client_redirect_url:
# Some SAML identity providers (e.g. Google) require a
# RelayState parameter on requests, so pass in a dummy redirect URL
# (which will never get used).
client_redirect_url = b"unused"
reqid, info = self._saml_client.prepare_for_authenticate(
entityid=self._saml_idp_entityid, relay_state=client_redirect_url
)
# Since SAML sessions timeout it is useful to log when they were created.
logger.info("Initiating a new SAML session: %s" % (reqid,))
now = self.clock.time_msec()
self._outstanding_requests_dict[reqid] = Saml2SessionData(
creation_time=now, ui_auth_session_id=ui_auth_session_id,
)
for key, value in info["headers"]:
if key == "Location":
return value
# this shouldn't happen!
raise Exception("prepare_for_authenticate didn't return a Location header")
async def handle_saml_response(self, request: SynapseRequest) -> None:
"""Handle an incoming request to /_synapse/client/saml2/authn_response
Args:
request: the incoming request from the browser. We'll
respond to it with a redirect.
Returns:
Completes once we have handled the request.
"""
resp_bytes = parse_string(request, "SAMLResponse", required=True)
relay_state = parse_string(request, "RelayState", required=True)
# expire outstanding sessions before parse_authn_request_response checks
# the dict.
self.expire_sessions()
try:
saml2_auth = self._saml_client.parse_authn_request_response(
resp_bytes,
saml2.BINDING_HTTP_POST,
outstanding=self._outstanding_requests_dict,
)
except saml2.response.UnsolicitedResponse as e:
# the pysaml2 library helpfully logs an ERROR here, but neglects to log
# the session ID. I don't really want to put the full text of the exception
# in the (user-visible) exception message, so let's log the exception here
# so we can track down the session IDs later.
logger.warning(str(e))
self._sso_handler.render_error(
request, "unsolicited_response", "Unexpected SAML2 login."
)
return
except Exception as e:
self._sso_handler.render_error(
request,
"invalid_response",
"Unable to parse SAML2 response: %s." % (e,),
)
return
if saml2_auth.not_signed:
self._sso_handler.render_error(
request, "unsigned_respond", "SAML2 response was not signed."
)
return
logger.debug("SAML2 response: %s", saml2_auth.origxml)
await self._handle_authn_response(request, saml2_auth, relay_state)
async def _handle_authn_response(
self,
request: SynapseRequest,
saml2_auth: saml2.response.AuthnResponse,
relay_state: str,
) -> None:
"""Handle an AuthnResponse, having parsed it from the request params
Assumes that the signature on the response object has been checked. Maps
the user onto an MXID, registering them if necessary, and returns a response
to the browser.
Args:
request: the incoming request from the browser. We'll respond to it with an
HTML page or a redirect
saml2_auth: the parsed AuthnResponse object
relay_state: the RelayState query param, which encodes the URI to rediret
back to
"""
for assertion in saml2_auth.assertions:
# kibana limits the length of a log field, whereas this is all rather
# useful, so split it up.
count = 0
for part in chunk_seq(str(assertion), 10000):
logger.info(
"SAML2 assertion: %s%s", "(%i)..." % (count,) if count else "", part
)
count += 1
logger.info("SAML2 mapped attributes: %s", saml2_auth.ava)
current_session = self._outstanding_requests_dict.pop(
saml2_auth.in_response_to, None
)
# first check if we're doing a UIA
if current_session and current_session.ui_auth_session_id:
try:
remote_user_id = self._remote_id_from_saml_response(saml2_auth, None)
except MappingException as e:
logger.exception("Failed to extract remote user id from SAML response")
self._sso_handler.render_error(request, "mapping_error", str(e))
return
return await self._sso_handler.complete_sso_ui_auth_request(
self.idp_id,
remote_user_id,
current_session.ui_auth_session_id,
request,
)
# otherwise, we're handling a login request.
# Ensure that the attributes of the logged in user meet the required
# attributes.
for requirement in self._saml2_attribute_requirements:
if not _check_attribute_requirement(saml2_auth.ava, requirement):
self._sso_handler.render_error(
request, "unauthorised", "You are not authorised to log in here."
)
return
# Call the mapper to register/login the user
try:
await self._complete_saml_login(saml2_auth, request, relay_state)
except MappingException as e:
logger.exception("Could not map user")
self._sso_handler.render_error(request, "mapping_error", str(e))
async def _complete_saml_login(
self,
saml2_auth: saml2.response.AuthnResponse,
request: SynapseRequest,
client_redirect_url: str,
) -> None:
"""
Given a SAML response, complete the login flow
Retrieves the remote user ID, registers the user if necessary, and serves
a redirect back to the client with a login-token.
Args:
saml2_auth: The parsed SAML2 response.
request: The request to respond to
client_redirect_url: The redirect URL passed in by the client.
Raises:
MappingException if there was a problem mapping the response to a user.
RedirectException: some mapping providers may raise this if they need
to redirect to an interstitial page.
"""
remote_user_id = self._remote_id_from_saml_response(
saml2_auth, client_redirect_url
)
async def saml_response_to_remapped_user_attributes(
failures: int,
) -> UserAttributes:
"""
Call the mapping provider to map a SAML response to user attributes and coerce the result into the standard form.
This is backwards compatibility for abstraction for the SSO handler.
"""
# Call the mapping provider.
result = self._user_mapping_provider.saml_response_to_user_attributes(
saml2_auth, failures, client_redirect_url
)
# Remap some of the results.
return UserAttributes(
localpart=result.get("mxid_localpart"),
display_name=result.get("displayname"),
emails=result.get("emails", []),
)
async def grandfather_existing_users() -> Optional[str]:
# backwards-compatibility hack: see if there is an existing user with a
# suitable mapping from the uid
if (
self._grandfathered_mxid_source_attribute
and self._grandfathered_mxid_source_attribute in saml2_auth.ava
):
attrval = saml2_auth.ava[self._grandfathered_mxid_source_attribute][0]
user_id = UserID(
map_username_to_mxid_localpart(attrval), self.server_name
).to_string()
logger.debug(
"Looking for existing account based on mapped %s %s",
self._grandfathered_mxid_source_attribute,
user_id,
)
users = await self.store.get_users_by_id_case_insensitive(user_id)
if users:
registered_user_id = list(users.keys())[0]
logger.info("Grandfathering mapping to %s", registered_user_id)
return registered_user_id
return None
await self._sso_handler.complete_sso_login_request(
self.idp_id,
remote_user_id,
request,
client_redirect_url,
saml_response_to_remapped_user_attributes,
grandfather_existing_users,
)
def _remote_id_from_saml_response(
self,
saml2_auth: saml2.response.AuthnResponse,
client_redirect_url: Optional[str],
) -> str:
"""Extract the unique remote id from a SAML2 AuthnResponse
Args:
saml2_auth: The parsed SAML2 response.
client_redirect_url: The redirect URL passed in by the client.
Returns:
remote user id
Raises:
MappingException if there was an error extracting the user id
"""
# It's not obvious why we need to pass in the redirect URI to the mapping
# provider, but we do :/
remote_user_id = self._user_mapping_provider.get_remote_user_id(
saml2_auth, client_redirect_url
)
if not remote_user_id:
raise MappingException(
"Failed to extract remote user id from SAML response"
)
return remote_user_id
def expire_sessions(self):
expire_before = self.clock.time_msec() - self._saml2_session_lifetime
to_expire = set()
for reqid, data in self._outstanding_requests_dict.items():
if data.creation_time < expire_before:
to_expire.add(reqid)
for reqid in to_expire:
logger.debug("Expiring session id %s", reqid)
del self._outstanding_requests_dict[reqid]
def _check_attribute_requirement(ava: dict, req: SamlAttributeRequirement) -> bool:
values = ava.get(req.attribute, [])
for v in values:
if v == req.value:
return True
logger.info(
"SAML2 attribute %s did not match required value '%s' (was '%s')",
req.attribute,
req.value,
values,
)
return False
DOT_REPLACE_PATTERN = re.compile(
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
)
def dot_replace_for_mxid(username: str) -> str:
"""Replace any characters which are not allowed in Matrix IDs with a dot."""
username = username.lower()
username = DOT_REPLACE_PATTERN.sub(".", username)
# regular mxids aren't allowed to start with an underscore either
username = re.sub("^_", "", username)
return username
MXID_MAPPER_MAP = {
"hexencode": map_username_to_mxid_localpart,
"dotreplace": dot_replace_for_mxid,
} # type: Dict[str, Callable[[str], str]]
@attr.s
class SamlConfig:
mxid_source_attribute = attr.ib()
mxid_mapper = attr.ib()
class DefaultSamlMappingProvider:
__version__ = "0.0.1"
def __init__(self, parsed_config: SamlConfig, module_api: ModuleApi):
"""The default SAML user mapping provider
Args:
parsed_config: Module configuration
module_api: module api proxy
"""
self._mxid_source_attribute = parsed_config.mxid_source_attribute
self._mxid_mapper = parsed_config.mxid_mapper
self._grandfathered_mxid_source_attribute = (
module_api._hs.config.saml2_grandfathered_mxid_source_attribute
)
def get_remote_user_id(
self, saml_response: saml2.response.AuthnResponse, client_redirect_url: str
) -> str:
"""Extracts the remote user id from the SAML response"""
try:
return saml_response.ava["uid"][0]
except KeyError:
logger.warning("SAML2 response lacks a 'uid' attestation")
raise MappingException("'uid' not in SAML2 response")
def saml_response_to_user_attributes(
self,
saml_response: saml2.response.AuthnResponse,
failures: int,
client_redirect_url: str,
) -> dict:
"""Maps some text from a SAML response to attributes of a new user
Args:
saml_response: A SAML auth response object
failures: How many times a call to this function with this
saml_response has resulted in a failure
client_redirect_url: where the client wants to redirect to
Returns:
dict: A dict containing new user attributes. Possible keys:
* mxid_localpart (str): Required. The localpart of the user's mxid
* displayname (str): The displayname of the user
* emails (list[str]): Any emails for the user
"""
try:
mxid_source = saml_response.ava[self._mxid_source_attribute][0]
except KeyError:
logger.warning(
"SAML2 response lacks a '%s' attestation", self._mxid_source_attribute,
)
raise SynapseError(
400, "%s not in SAML2 response" % (self._mxid_source_attribute,)
)
# Use the configured mapper for this mxid_source
localpart = self._mxid_mapper(mxid_source)
# Append suffix integer if last call to this function failed to produce
# a usable mxid.
localpart += str(failures) if failures else ""
# Retrieve the display name from the saml response
# If displayname is None, the mxid_localpart will be used instead
displayname = saml_response.ava.get("displayName", [None])[0]
# Retrieve any emails present in the saml response
emails = saml_response.ava.get("email", [])
return {
"mxid_localpart": localpart,
"displayname": displayname,
"emails": emails,
}
@staticmethod
def parse_config(config: dict) -> SamlConfig:
"""Parse the dict provided by the homeserver's config
Args:
config: A dictionary containing configuration options for this provider
Returns:
SamlConfig: A custom config object for this module
"""
# Parse config options and use defaults where necessary
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
mapping_type = config.get("mxid_mapping", "hexencode")
# Retrieve the associating mapping function
try:
mxid_mapper = MXID_MAPPER_MAP[mapping_type]
except KeyError:
raise ConfigError(
"saml2_config.user_mapping_provider.config: '%s' is not a valid "
"mxid_mapping value" % (mapping_type,)
)
return SamlConfig(mxid_source_attribute, mxid_mapper)
@staticmethod
def get_saml_attributes(config: SamlConfig) -> Tuple[Set[str], Set[str]]:
"""Returns the required attributes of a SAML
Args:
config: A SamlConfig object containing configuration params for this provider
Returns:
The first set equates to the saml auth response
attributes that are required for the module to function, whereas the
second set consists of those attributes which can be used if
available, but are not necessary
"""
return {"uid", config.mxid_source_attribute}, {"displayName", "email"}
| 37.129213 | 125 | 0.636859 |
ee32ef13c73ce5bbea54c7901838688701967e73 | 1,726 | py | Python | ggplot/geoms/geom_point.py | peckto/ggpy | 28f8998a4199d9434dd5f2ae3ea7127b423118e4 | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/geom_point.py | peckto/ggpy | 28f8998a4199d9434dd5f2ae3ea7127b423118e4 | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/geom_point.py | peckto/ggpy | 28f8998a4199d9434dd5f2ae3ea7127b423118e4 | [
"BSD-2-Clause"
] | null | null | null | from .geom import geom
import numpy as np
from ..utils import is_date
def _date_to_number(i):
return i.toordinal() + i.time().hour/24 + i.time().minute/1440 + i.time().second/86400
class geom_point(geom):
"""
Scatterplot of (x, y) coordinates
Parameters
----------
x:
x values for (x, y) coordinates
y:
y values for (x, y) coordinates
color:
color of points
alpha:
transparency of color
shape:
type of point used ('o', '^', 'D', 'v', 's', '*', 'p', '8', "_", "|", "_")
edgecolors:
color of the outer line of the point
size:
size of the point
Examples
--------
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'shape': 'o', 'size': 20, 'edgecolors': 'none', 'picker': False}
REQUIRED_AES = {'x', 'y'}
_aes_renames = {'size': 's', 'shape': 'marker', 'color': 'c'}
DEFAULT_PARAMS = {'position': None}
def plot(self, ax, data, _aes):
(data, _aes) = self._update_data(data, _aes)
params = self._get_plot_args(data, _aes)
variables = _aes.data
x = data[variables['x']]
y = data[variables['y']]
if 'colormap' in variables:
params['cmap'] = variables['colormap']
if self.params.get("jitter"):
x *= np.random.uniform(.9, 1.1, len(x))
y *= np.random.uniform(.9, 1.1, len(y))
if is_date(x.iloc[0]):
dtype = x.iloc[0].__class__
x = np.array([_date_to_number(i) for i in x])
ax.scatter(x, y, **params)
new_ticks = [dtype(i) for i in ax.get_xticks()]
ax.set_xticklabels(new_ticks)
else:
ax.scatter(x, y, **params)
| 28.295082 | 113 | 0.531866 |
1eaff9aada897c7025659c14730fbc6432148053 | 3,495 | py | Python | Problem_11.py | infdahai/PE_sol | 7764c639474df19f3f02ca5804819a1a74d8a723 | [
"MIT"
] | null | null | null | Problem_11.py | infdahai/PE_sol | 7764c639474df19f3f02ca5804819a1a74d8a723 | [
"MIT"
] | null | null | null | Problem_11.py | infdahai/PE_sol | 7764c639474df19f3f02ca5804819a1a74d8a723 | [
"MIT"
] | null | null | null | input_2 = '''08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48'''
input_1 = '''08 02 22 97
49 49 99 40
81 49 31 73
52 70 95 23'''
'''
TODO:
可以考虑根据分块大小,建立前缀积和后缀积,从而快速算出 水平或竖直方向的最大值。消耗是三次n乘法。
'''
input_list = input_2.split('\n')
int_list = []
for l_j in input_list:
l_j = l_j.split(' ')
l1 = list(map(int, l_j))
int_list.append(l1)
max_list =[]
#max_val = 1788696
def leftToRight(a: list) -> int:
l = len(a)
val = 0
for i in range(0, l - 3):
mul = 1
for j in range(4):
if (a[i + j]!=0):
mul *= a[i + j]
else:
mul = 0
break
val = max(val,mul)
return val
max_leftToRight = 0
for list_left in int_list:
val = leftToRight(list_left)
max_leftToRight = max(max_leftToRight, val)
max_list.append(max_leftToRight)
def upToDown(a: list) -> int: # a -> int_list
l1 = len(a)
l2 = len(a[0])
val = 0
for i in range(l1 - 3):
mul1 = 1
for j in range(l2):
mul2 = 1
for k in range(4):
if(a[i + k][j]!=0):
mul2 *= a[i + k][j]
else:
mul2 = 0
break
mul1 = max(mul1, mul2)
val = max(mul1, val)
return val
max_upToDown = upToDown(int_list)
max_list.append(max_upToDown)
def leftDiagonRight(a:list) -> int : # a -> int_list
l = len(a)
val = 0
for i in range(l-3):
mul1 = 1
for j in range(l-3):
mul2 = 1
for k in range(4):
if(a[i+k][j+k]!=0):
mul2 *= a[i+k][j+k]
else:
mul2 = 0
break
mul1 = max(mul1, mul2)
val = max(mul1, val)
return val
def rightDiagonLeft(a:list) -> int :
l = len(a)
val = 0
for i in range(l - 3):
mul1 = 1
for j in range(3,l ):
mul2 = 1
for k in range(4):
if (a[i + k][j - k] != 0):
mul2 *= a[i + k][j - k]
else:
mul2 = 0
break
mul1 = max(mul1, mul2)
val = max(mul1, val)
return val
max_list.append(leftDiagonRight(int_list))
max_list.append(rightDiagonLeft(int_list))
print(max(max_list))
| 28.185484 | 72 | 0.530758 |
3aa36cafff536d8ddcf99750591af49d1416fe59 | 4,880 | py | Python | daq/docker_test.py | InBrewJ/daq | f3438146c7148bd570182821dec95f602a22c6c5 | [
"Apache-2.0"
] | null | null | null | daq/docker_test.py | InBrewJ/daq | f3438146c7148bd570182821dec95f602a22c6c5 | [
"Apache-2.0"
] | 20 | 2019-07-02T09:22:29.000Z | 2019-10-08T13:44:01.000Z | daq/docker_test.py | InBrewJ/daq | f3438146c7148bd570182821dec95f602a22c6c5 | [
"Apache-2.0"
] | 1 | 2019-08-19T12:38:28.000Z | 2019-08-19T12:38:28.000Z | """Module for running docker-container tests"""
import datetime
import logging
import os
from clib import docker_host
import wrappers
LOGGER = logging.getLogger('docker')
class DockerTest():
"""Class for running docker tests"""
IMAGE_NAME_FORMAT = 'daq/test_%s'
CONTAINER_PREFIX = 'daq'
def __init__(self, runner, target_port, tmpdir, test_name):
self.target_port = target_port
self.tmpdir = tmpdir
self.test_name = test_name
self.runner = runner
self.host_name = '%s%02d' % (test_name, self.target_port)
self.docker_log = None
self.docker_host = None
self.callback = None
self.start_time = None
def start(self, port, params, callback):
"""Start the docker test"""
LOGGER.debug('Target port %d starting docker test %s', self.target_port, self.test_name)
self.start_time = datetime.datetime.now()
self.callback = callback
env_vars = ["TARGET_NAME=" + self.host_name,
"TARGET_IP=" + params['target_ip'],
"TARGET_MAC=" + params['target_mac'],
"GATEWAY_IP=" + params['gateway_ip'],
"GATEWAY_MAC=" + params['gateway_mac']]
if 'local_ip' in params:
env_vars += ["LOCAL_IP=" + params['local_ip'],
"SWITCH_PORT=" + params['switch_port'],
"SWITCH_IP=" + params['switch_ip']]
vol_maps = [params['scan_base'] + ":/scans"]
self._map_if_exists(vol_maps, params, 'inst')
self._map_if_exists(vol_maps, params, 'port')
self._map_if_exists(vol_maps, params, 'device')
self._map_if_exists(vol_maps, params, 'type')
image = self.IMAGE_NAME_FORMAT % self.test_name
LOGGER.debug("Target port %d running docker test %s", self.target_port, image)
cls = docker_host.make_docker_host(image, prefix=self.CONTAINER_PREFIX)
try:
host = self.runner.add_host(self.host_name, port=port, cls=cls, env_vars=env_vars,
vol_maps=vol_maps, tmpdir=self.tmpdir)
self.docker_host = host
except Exception as e:
# pylint: disable=no-member
raise wrappers.DaqException(e)
try:
LOGGER.debug("Target port %d activating docker test %s", self.target_port, image)
host = self.docker_host
pipe = host.activate(log_name=None)
# Docker tests don't use DHCP, so manually set up DNS.
host.cmd('echo nameserver $GATEWAY_IP > /etc/resolv.conf')
self.docker_log = host.open_log()
self.runner.monitor_stream(self.host_name, pipe.stdout, copy_to=self.docker_log,
hangup=self._docker_complete,
error=self._docker_error)
except:
host.terminate()
self.runner.remove_host(host)
raise
LOGGER.info("Target port %d test %s running", self.target_port, self.test_name)
def _map_if_exists(self, vol_maps, params, kind):
base = params.get('%s_base' % kind)
if base and os.path.exists(base):
abs_base = os.path.abspath(base)
vol_maps += ['%s:/config/%s' % (abs_base, kind)]
def _docker_error(self, e):
LOGGER.error('Target port %d docker error: %s', self.target_port, e)
if self._docker_finalize() is None:
LOGGER.warning('Target port %d docker already terminated.', self.target_port)
else:
self.callback(exception=e)
def _docker_finalize(self):
if self.docker_host:
LOGGER.debug('Target port %d docker finalize', self.target_port)
self.runner.remove_host(self.docker_host)
return_code = self.docker_host.terminate()
self.docker_host = None
self.docker_log.close()
self.docker_log = None
return return_code
return None
def _docker_complete(self):
try:
return_code = self._docker_finalize()
exception = None
except Exception as e:
return_code = -1
exception = e
LOGGER.exception(e)
delay = (datetime.datetime.now() - self.start_time).total_seconds()
LOGGER.debug("Target port %d docker complete, return=%d (%s)",
self.target_port, return_code, exception)
if return_code:
LOGGER.info("Target port %d test %s failed %ss: %s %s",
self.target_port, self.test_name, delay, return_code, exception)
else:
LOGGER.info("Target port %d test %s passed %ss",
self.target_port, self.test_name, delay)
self.callback(return_code=return_code, exception=exception)
| 39.674797 | 96 | 0.592008 |
7abba1cbe6883605a668397055b009d178e23cba | 4,756 | py | Python | phosphodisco/utils.py | ruggleslab/phosphodisco | 9e0861dd31f07fc3a161e285777092759bfea26b | [
"MIT"
] | null | null | null | phosphodisco/utils.py | ruggleslab/phosphodisco | 9e0861dd31f07fc3a161e285777092759bfea26b | [
"MIT"
] | 5 | 2021-10-05T21:03:00.000Z | 2021-12-07T19:22:03.000Z | phosphodisco/utils.py | ruggleslab/phosphodisco | 9e0861dd31f07fc3a161e285777092759bfea26b | [
"MIT"
] | null | null | null | import warnings
from typing import Iterable, Optional
from pandas import Series
import numpy as np
import pandas as pd
from sklearn.linear_model import RidgeCV
from statsmodels.stats.multitest import multipletests
from scipy.stats import pearsonr, spearmanr
def norm_line_to_residuals(
ph_line: Iterable,
prot_line: Iterable,
regularization_values: Optional[Iterable] = None,
cv: Optional[int] = None,
prevent_negative_coefficients: bool = True,
**ridgecv_kwargs
) -> Series:
"""Uses CV and regularized linear regression to calculate residuals, representing
protein-normalized phospho values for one line of data each.
Args:
ph_line: Vector of phosphorylation data.
prot_line: Vector of protein data.
regularization_values: Which regularization values should be tried during CV to define
the coefficients.
cv: The number of cross validation folds to try for calculating the regularization
value.
prevent_negative_coefficients: If the linear coefficient between protein and phospho
values is negative, something complicated is going on in that relationship. Set this to
True to just return missing values in that case.
**ridgecv_kwargs: Additional keywork args for sklearn.linear_model.RidgeCV
Returns: Series of residuals, representing protein abundance-normalized phospho data.
"""
if regularization_values is None:
regularization_values = [5 ** i for i in range(-5, 5)]
if cv is None:
cv = 3
warnings.filterwarnings("ignore", category=DeprecationWarning)
nonull = np.logical_and(~np.isnan(ph_line), ~np.isnan(prot_line))
if sum(nonull) < cv:
return pd.Series(np.empty(len(ph_line)), ph_line.index)
features = prot_line[nonull].values.reshape(-1, 1)
labels = ph_line[nonull].values
ridgecv_kwargs['alphas'] = regularization_values
ridgecv_kwargs['cv'] = cv
model = RidgeCV(**ridgecv_kwargs).fit(features, labels)
if prevent_negative_coefficients and (model.coef_[0] <= 0):
return pd.Series(np.empty(len(ph_line)), ph_line.index)
prediction = model.predict(features)
residuals = labels - prediction
return pd.Series(residuals, index=ph_line[nonull].index)
def multiple_tests_na(pvals: np.array, **multitest_kwargs):
"""Performs statsmodels.stats.multitest.multipletests with tolerance for np.nans
Args:
pvals: Vector of p-values to correct
**multitest_kwargs: Additional keyword args for statsmodels.stats.multitest.multipletests
Returns: Vector of corrected p-values.
"""
mask = np.isfinite(pvals)
pval_corrected = np.full(pvals.shape, np.nan)
pval_corrected[mask] = multipletests(pvals[mask], **multitest_kwargs)[1]
return pval_corrected
def not_na(array):
"""Identifies non-null values for pd.Series or np.array
Args:
array: Vector of values
Returns: Vector of which values are non-null.
"""
if isinstance(array, Series):
return ~array.isna()
return ~np.isnan(array)
def corr_na(array1, array2, corr_method: str = 'spearmanr', **addl_kws):
"""Correlation method that tolerates missing values. Can take pearsonr or spearmanr.
Args:
array1: Vector of values
array2: Vector of values
corr_method: Which method to use, pearsonr or spearmanr.
**addl_kws: Additional keyword args to pass to scipy.stats corr methods.
Returns: R and p-value from correlation of 2 vectors.
"""
if corr_method not in ['pearsonr', 'spearmanr']:
raise ValueError(
'Method %s is a valid correlation method, must be: %s'
% (corr_method, ','.join(['pearsonr', 'spearmanr']))
)
nonull = np.logical_and(not_na(array1), not_na(array2))
if sum(nonull) > 2:
return eval(corr_method)(array1[nonull], array2[nonull], **addl_kws)
return np.nan, np.nan
def zscore(df):
"""Row zscores a DataFrame, ignores np.nan
Args:
df (DataFrame): DataFrame to z-score
Returns (DataFrame):
Row-zscored DataFrame.
"""
return df.subtract(df.mean(axis=1), axis=0).divide(df.std(axis=1), axis=0)
def missing_and_stdev_filter(df, na_frac_threshold=0.25, std_quantile_threshold = 0.5):
"""
Performs standard deviation and missingness filtering by dropping columns that have more than 25% NAs and
keeping values that are in the 50th percentile and up of standard deviation from the mean
"""
df_filt = df.loc[df.isnull().sum(axis=1)<df.shape[1]*na_frac_threshold]
df_filt = df_filt.loc[df_filt.std(axis=1)>np.quantile(df_filt.std(axis=1), std_quantile_threshold)]
return df_filt
| 35.22963 | 110 | 0.697435 |
fcc8cd3bceb2b8c5606cf3c4a1aa4371216b38b4 | 2,584 | py | Python | redux/mods/api/Twitch.py | PanjaCo/Redux-Bot | 15f4410b3cff137785028b0df4e27258ecad1a04 | [
"MIT"
] | 1 | 2018-02-18T04:05:18.000Z | 2018-02-18T04:05:18.000Z | redux/mods/api/Twitch.py | iPanja/Redux-Bot | 15f4410b3cff137785028b0df4e27258ecad1a04 | [
"MIT"
] | null | null | null | redux/mods/api/Twitch.py | iPanja/Redux-Bot | 15f4410b3cff137785028b0df4e27258ecad1a04 | [
"MIT"
] | null | null | null | """ DOES NOT WORK --- Will be fixed in a future update"""
import discord, requests, json
from discord.ext import commands
import config
from pprint import pprint
class Twitch:
def __init__(self, bot, config):
self.bot = bot
self.config = config
self.headers = {'Authorization' : 'Client-ID ' + config['client_id']}
@commands.command(pass_context = True)
async def tGameStreamers(self, ctx):
query = "".join((ctx.message.content).split()[1:])
game = self.getGame(query)
www = self.config['url'] + 'streams?first=3&game_id=' + game.id
r = requests.get(www, headers=self.headers)
jsonDict = json.loads(json.dumps(r.json()))
pprint(jsonDict)
streamers = list()
for s in jsonDict['data']:
streamer = Streamer(s['id'], s['user_id'], s['type'], s['title'], s['viewer_count'])
streamer = self.getName(streamer)
embed = discord.Embed(title=game.name, description="Top streamers")
for s in streamers:
embed.add_field(name=s.name, value=s.type)
embed.add_field(name="Title", value=s.title)
embed.add_field(name="Viewers", value=s.viewer_count)
embed.add_field(name="------------------------------")
await self.bot.send_message(ctx.message.channel, embed=embed)
def getGame(self, gameName):
www = self.config['url'] + 'games/?name=' + gameName
r = requests.get(www, headers=self.headers)
jsonDict = json.loads(json.dumps(r.json()))
pprint(jsonDict)
game = Game(jsonDict[0]['id'], jsonDict[0]['name'], jsonDict[0]['box_art_url'])
return game
def getName(self, streamer):
www = self.config['url'] + 'users/?id=' + streamer.user_id
r = requests.get(www, headers=self.headers)
jsonDict = json.loads(json.dumps(r.json()))
pprint(jsonDict)
streamer.name = jsonDict[0]['display_name']
return streamer
class Game:
def __init__(self, id, name, box_art_url):
self.id = id
self.name = name
self.box_art_url = box_art_url
class Streamer:
def __init__(self, id, user_id, type, title, viewer_count, name=None):
self.id = id
self.user_id = user_id
self.type = type
self.title = title
self.viewer_count = viewer_count
self.name = name
def setup(bot):
try:
bot.add_cog(Twitch(bot, config.twitch))
print("[Twitch Module Loaded]")
except Exception as e:
print(" >> Twitch Module: {0}".format(e)) | 36.914286 | 96 | 0.602167 |
36b1fcafa736d399b2782d92eff20ff9806e542c | 1,908 | py | Python | Database/djangodatabase/penpals/migrations/0001_initial.py | yichenesia/newhacks-your-name | a905a4a9b513d12ed1f40089c5cf4b8403d9a4bd | [
"MIT"
] | null | null | null | Database/djangodatabase/penpals/migrations/0001_initial.py | yichenesia/newhacks-your-name | a905a4a9b513d12ed1f40089c5cf4b8403d9a4bd | [
"MIT"
] | null | null | null | Database/djangodatabase/penpals/migrations/0001_initial.py | yichenesia/newhacks-your-name | a905a4a9b513d12ed1f40089c5cf4b8403d9a4bd | [
"MIT"
] | 2 | 2020-11-08T05:34:20.000Z | 2020-11-17T20:16:09.000Z | # Generated by Django 3.1.3 on 2020-11-08 04:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Penpals',
fields=[
('user_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=25)),
('gender', models.CharField(max_length=50)),
('location', models.CharField(max_length=100)),
('bio', models.CharField(max_length=140)),
('password', models.CharField(max_length=25)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Letters',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=100)),
('content', models.CharField(max_length=25000)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='writer', to='penpals.penpals')),
('to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to='penpals.penpals')),
],
),
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friend1', to='penpals.penpals')),
('id2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friend2', to='penpals.penpals')),
],
),
]
| 41.478261 | 136 | 0.587002 |
ef167393b1fce15c2c87f700fa03ccca4a9e8bdc | 4,938 | py | Python | scripts_Python/LSTM_mod_attention.py | ilariagnd/CardioICURIsk | 07aeddaa5a2140d279c57f273bdc4c5bf326e47f | [
"MIT"
] | 1 | 2021-07-19T15:58:25.000Z | 2021-07-19T15:58:25.000Z | scripts_Python/LSTM_mod_attention.py | ilariagnd/CardioICURisk | 07aeddaa5a2140d279c57f273bdc4c5bf326e47f | [
"MIT"
] | null | null | null | scripts_Python/LSTM_mod_attention.py | ilariagnd/CardioICURisk | 07aeddaa5a2140d279c57f273bdc4c5bf326e47f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
import scipy
from IPython.display import clear_output
from tensorflow.keras import activations, backend
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import os.path
import tensorflow.keras as keras
from tensorflow.keras import backend as K
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten,LSTM, TimeDistributed, Masking, Reshape, Lambda, RepeatVector, Permute, multiply
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import StratifiedKFold, GridSearchCV, RepeatedKFold
from sklearn.utils import resample
from sklearn.metrics import roc_curve,roc_auc_score, confusion_matrix
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import KFold
import shap as sh
from keras import backend as K
tf.compat.v1.disable_eager_execution()
# should be 2.1.0
tf.__version__
# function for attention layer
def attention(inputs, SHAPE):
n_steps = int(inputs.shape[1])
a = Permute((1, 2))(inputs)
a = Reshape((n_steps, SHAPE))(a)
a = Dense(SHAPE, activation='softmax', name='attention_vec')(a)
output_attention_mul = multiply([inputs, a])
return output_attention_mul
# function to extract activation weights
def get_activations(model, inputs, print_shape_only=False, layer_name=None, verbose=False):
activations = []
inp = model.input
if layer_name is None:
outputs = [layer.output for layer in model.layers]
else:
outputs = [layer.output for layer in model.layers if layer.name == layer_name]
funcs = [K.function([inp] + [K.learning_phase()], [out]) for out in outputs]
layer_outputs = [func([inputs, 1.])[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if verbose:
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
# Function that creates the model based on parameters
def create_model(optimizer="adam", dropout=0.2, init='uniform', dense_nparams1=128, lr=0.001, n_wind=10):
input_layer = Input(shape=(n_wind, n_features))
x = attention(input_layer, n_features)
x = LSTM(dense_nparams1, activation='tanh', return_sequences=False, recurrent_dropout = dropout)(x)
preds = Dense(1, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=preds)
RMS = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08)
model.compile(optimizer=RMS, loss='binary_crossentropy', metrics=['acc'])
return model
# Read training and test sets
train_x_path = "CardioICURisk/output/o4.x_train.csv"
test_x_path = "CardioICURisk/output/o4.x_test.csv"
train_y_path = "CardioICURisk/output/o4.y_train.csv"
test_y_path = "CardioICURisk/output/o4.y_test.csv"
x_train=np.loadtxt(open(train_x_path, 'rt'), delimiter=",", skiprows=1)
y_train=np.loadtxt(open(train_y_path, 'rt'), delimiter=",", skiprows=1, usecols = 1)
x_test=np.loadtxt(open(test_x_path, 'rt'), delimiter=",", skiprows=1)
y_test=np.loadtxt(open(test_y_path, 'rt'), delimiter=",", skiprows=1, usecols = 1)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = x_train.shape[1]
n_wind = 10
n_ind_train = int(x_train.shape[0]/n_wind)
n_ind_test = int(x_test.shape[0]/n_wind)
x_train = x_train.reshape((n_ind_train, 10, n_features))
x_test = x_test.reshape((n_ind_test, 10, n_features))
x_train.shape, y_train.shape, x_test.shape, y_train.shape
# select model's parameters based on best performance of 10-fold cross-validation
cv_res = pd.read_csv("CardioICURisk/output/o5.models_params.csv")
cv_res=cv_res.sort_values(by=['auc'], ascending=False)
dropout1= cv_res['dropout'].iloc[0]
unit_n1 = cv_res['unit_n'].iloc[0]
epoch_n1 = cv_res['epoch_n'].iloc[0]
lr1 = cv_res['lr'].iloc[0]
batch_n1 = cv_res['batch_n'].iloc[0]
# Create and train the model
K.clear_session()
model=create_model(optimizer="adam", dropout=dropout1, init='uniform', dense_nparams1=unit_n1, lr=lr1, n_wind=10)
model.fit(x_train, y_train, batch_size=batch_n1, epochs=epoch_n1,
validation_split=0.2, verbose=0)
# save output files
model.save('CardioICURisk/output/o5.fin_model.h5')
y_test_prob=model.predict(x_test)
np.savetxt("CardioICURisk/output/o5.fin_model_pred.csv", y_test_prob, delimiter=',')
activations = get_activations(model, x_test, print_shape_only=True, layer_name='attention_vec', verbose=True)[0]
act_2d=activations.transpose(0,2,1).reshape(x_test.shape[0], x_test.shape[2]*10)
np.savetxt("CardioICURisk/output/o5.fin_model_act.csv", act_2d, delimiter=',')
| 33.14094 | 162 | 0.739773 |
cb5c430bba5c7565a5408ce4432c0ed828f5e664 | 4,815 | py | Python | stdplugins/zombies.py | kaalhoonme/PepeBot | d1678f3c5e57adb8c9d2e1bc5a54568ad2938258 | [
"Apache-2.0"
] | null | null | null | stdplugins/zombies.py | kaalhoonme/PepeBot | d1678f3c5e57adb8c9d2e1bc5a54568ad2938258 | [
"Apache-2.0"
] | null | null | null | stdplugins/zombies.py | kaalhoonme/PepeBot | d1678f3c5e57adb8c9d2e1bc5a54568ad2938258 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""".zombies
Usage: Searches for deleted accounts in a groups and channels.
Use .zombies clean to remove deleted accounts from the groups and channels.
\nPorted by ©[NIKITA](t.me/kirito6969) and ©[EYEPATCH](t.me/NeoMatrix90)"""
from telethon import events
import requests
from uniborg.util import admin_cmd
#
from asyncio import sleep
from os import remove
from telethon.errors import (BadRequestError, ChatAdminRequiredError,
ImageProcessFailedError, PhotoCropSizeSmallError,
UserAdminInvalidError)
from telethon.errors.rpcerrorlist import (UserIdInvalidError,
MessageTooLongError)
from telethon.tl.functions.channels import (EditAdminRequest,
EditBannedRequest,
EditPhotoRequest)
from telethon.tl.functions.messages import UpdatePinnedMessageRequest
from telethon.tl.types import (ChannelParticipantsAdmins, ChatAdminRights,
ChatBannedRights, MessageEntityMentionName,
MessageMediaPhoto)
# from uniborg.events import register
# =================== CONSTANT ===================
PP_TOO_SMOL = "`The image is too small`"
PP_ERROR = "`Failure while processing the image`"
NO_ADMIN = "`I am not an admin!`"
NO_PERM = "`I don't have sufficient permissions!`"
NO_SQL = "`Running on Non-SQL mode!`"
CHAT_PP_CHANGED = "`Chat Picture Changed`"
CHAT_PP_ERROR = "`Some issue with updating the pic,`" \
"`maybe coz I'm not an admin,`" \
"`or don't have enough rights.`"
INVALID_MEDIA = "`Invalid Extension`"
BANNED_RIGHTS = ChatBannedRights(
until_date=None,
view_messages=True,
send_messages=True,
send_media=True,
send_stickers=True,
send_gifs=True,
send_games=True,
send_inline=True,
embed_links=True,
)
UNBAN_RIGHTS = ChatBannedRights(
until_date=None,
send_messages=None,
send_media=None,
send_stickers=None,
send_gifs=None,
send_games=None,
send_inline=None,
embed_links=None,
)
MUTE_RIGHTS = ChatBannedRights(until_date=None, send_messages=True)
UNMUTE_RIGHTS = ChatBannedRights(until_date=None, send_messages=False)
# ================================================
@borg.on(events.NewMessage(pattern="^.zombies(?: |$)(.*)", outgoing=True))
async def rm_deletedacc(show):
""" For .zombies command, list all the ghost/deleted/zombie accounts in a chat. """
con = show.pattern_match.group(1).lower()
del_u = 0
del_status = "`No deleted accounts found, Group is clean`"
if con != "clean":
await show.edit("`Searching for ghost/deleted/zombie accounts...`")
async for user in show.client.iter_participants(show.chat_id):
if user.deleted:
del_u += 1
await sleep(1)
if del_u > 0:
del_status = f"`Found` **{del_u}** `ghost/deleted/zombie account(s) in this group,\
\nclean them by using .zombies clean`"
await show.edit(del_status)
return
# Here laying the sanity check
chat = await show.get_chat()
admin = chat.admin_rights
creator = chat.creator
# Well
if not admin and not creator:
await show.edit("`I am not an admin here!`")
return
await show.edit("`Deleting deleted accounts...\nOh I can do that?!?!`")
del_u = 0
del_a = 0
async for user in show.client.iter_participants(show.chat_id):
if user.deleted:
try:
await show.client(
EditBannedRequest(show.chat_id, user.id, BANNED_RIGHTS))
except ChatAdminRequiredError:
await show.edit("`I don't have ban rights in this group`")
return
except UserAdminInvalidError:
del_u -= 1
del_a += 1
await show.client(
EditBannedRequest(show.chat_id, user.id, UNBAN_RIGHTS))
del_u += 1
if del_u > 0:
del_status = f"Cleaned **{del_u}** deleted account(s)"
if del_a > 0:
del_status = f"Cleaned **{del_u}** deleted account(s) \
\n**{del_a}** deleted admin accounts are not removed"
await show.edit(del_status)
await sleep(2)
await show.delete()
if Config.G_BAN_LOGGER_GROUP is not None:
await show.client.send_message(
Config.G_BAN_LOGGER_GROUP, "#CLEANUP\n"
f"Cleaned **{del_u}** deleted account(s) !!\
\nCHAT: {show.chat.title}(`{show.chat_id}`)")
| 32.755102 | 95 | 0.621807 |
f164c97e9c0ad5b8af9351c8079778a5baabd520 | 634 | py | Python | web/apps/importer/context.py | exploratour/exploratour | b6d998a5aa471c1f9eba94bd093032d3e20f6d81 | [
"Zlib"
] | null | null | null | web/apps/importer/context.py | exploratour/exploratour | b6d998a5aa471c1f9eba94bd093032d3e20f6d81 | [
"Zlib"
] | null | null | null | web/apps/importer/context.py | exploratour/exploratour | b6d998a5aa471c1f9eba94bd093032d3e20f6d81 | [
"Zlib"
] | null | null | null | import threading
class IdAllocator(object):
def __init__(self):
self.cond = threading.Condition()
self.nextid = 1
def get(self):
self.cond.acquire()
try:
ret = self.nextid
self.nextid += 1
return ret;
finally:
self.cond.release()
g_allocator = IdAllocator()
class ImportContext(object):
id = None
error = None
def set_error(self, msg):
self.error = msg
return self
def setup(self, **kwargs):
self.id = g_allocator.get()
for k, v in kwargs.iteritems():
setattr(self, k, v)
| 20.451613 | 41 | 0.547319 |
eb15a41a244540f8b697a7ac2d8a5e20e26d3941 | 1,217 | py | Python | serializers/app_settings.py | CloudCIX/membership | a7a62918c7d7c65dd1bf2068431dbf2ec2573e4b | [
"Apache-2.0"
] | null | null | null | serializers/app_settings.py | CloudCIX/membership | a7a62918c7d7c65dd1bf2068431dbf2ec2573e4b | [
"Apache-2.0"
] | null | null | null | serializers/app_settings.py | CloudCIX/membership | a7a62918c7d7c65dd1bf2068431dbf2ec2573e4b | [
"Apache-2.0"
] | null | null | null | # libs
import serpy
class AppSettingsSerializer(serpy.Serializer):
"""
created:
description: The date that the App Settings entry was created.
type: string
id:
description: The ID of the App Settings record.
type: integer
minio_access_key:
description: Access key is like user ID that uniquely identifies your MinIO account.
type: string
minio_secret_key:
description: TSecret key is the password to your MinIO account.
type: string
minio_url:
description: The url for the MinIO instance for the COP.
type: string
updated:
description: The date that the App Settings entry was last updated.
type: string
uri:
description: |
The absolute URL of the App Settings record that can be used to perform `Read`, `Update` and `Delete`.
type: string
"""
created = serpy.Field(attr='created.isoformat', call=True)
id = serpy.Field()
minio_access_key = serpy.Field()
minio_secret_key = serpy.Field()
minio_url = serpy.Field()
updated = serpy.Field(attr='updated.isoformat', call=True)
uri = serpy.Field(attr='get_absolute_url', call=True)
| 32.891892 | 114 | 0.657354 |
8815b0f7982bbfdbbccced36a85b0924f78b70d1 | 48,094 | py | Python | environ/lib/python3.8/site-packages/werkzeug/test.py | EsauKip/Blog-post | e5716af25139ab4e867767990f04f0749fc9bf40 | [
"MIT"
] | 1 | 2021-05-24T10:08:51.000Z | 2021-05-24T10:08:51.000Z | environ/lib/python3.8/site-packages/werkzeug/test.py | EsauKip/Blog-post | e5716af25139ab4e867767990f04f0749fc9bf40 | [
"MIT"
] | null | null | null | environ/lib/python3.8/site-packages/werkzeug/test.py | EsauKip/Blog-post | e5716af25139ab4e867767990f04f0749fc9bf40 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | import mimetypes
import sys
import typing as t
import warnings
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
from http.cookiejar import CookieJar
from io import BytesIO
from itertools import chain
from random import random
from tempfile import TemporaryFile
from time import time
from urllib.request import Request as _UrllibRequest
from ._internal import _get_environ
from ._internal import _make_encode_wrapper
from ._internal import _wsgi_decoding_dance
from ._internal import _wsgi_encoding_dance
from .datastructures import Authorization
from .datastructures import CallbackDict
from .datastructures import CombinedMultiDict
from .datastructures import EnvironHeaders
from .datastructures import FileMultiDict
from .datastructures import Headers
from .datastructures import MultiDict
from .http import dump_cookie
from .http import dump_options_header
from .http import parse_options_header
from .sansio.multipart import Data
from .sansio.multipart import Epilogue
from .sansio.multipart import Field
from .sansio.multipart import File
from .sansio.multipart import MultipartEncoder
from .sansio.multipart import Preamble
from .urls import iri_to_uri
from .urls import url_encode
from .urls import url_fix
from .urls import url_parse
from .urls import url_unparse
from .urls import url_unquote
from .utils import get_content_type
from .wrappers.request import Request
from .wrappers.response import Response
from .wsgi import ClosingIterator
from .wsgi import get_current_url
if t.TYPE_CHECKING:
from wsgiref.types import WSGIApplication
from wsgiref.types import WSGIEnvironment
def stream_encode_multipart(
data: t.Mapping[str, t.Any],
use_tempfile: bool = True,
threshold: int = 1024 * 500,
boundary: t.Optional[str] = None,
charset: str = "utf-8",
) -> t.Tuple[t.BinaryIO, int, str]:
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = f"---------------WerkzeugFormPart_{time()}{random()}"
stream: t.BinaryIO = BytesIO()
total_length = 0
on_disk = False
if use_tempfile:
def write_binary(s: bytes) -> int:
nonlocal stream, total_length, on_disk
if on_disk:
return stream.write(s)
else:
length = len(s)
if length + total_length <= threshold:
stream.write(s)
else:
new_stream = t.cast(t.BinaryIO, TemporaryFile("wb+"))
new_stream.write(stream.getvalue()) # type: ignore
new_stream.write(s)
stream = new_stream
on_disk = True
total_length += length
return length
else:
write_binary = stream.write
encoder = MultipartEncoder(boundary.encode())
write_binary(encoder.send_event(Preamble(data=b"")))
for key, value in _iter_data(data):
reader = getattr(value, "read", None)
if reader is not None:
filename = getattr(value, "filename", getattr(value, "name", None))
content_type = getattr(value, "content_type", None)
if content_type is None:
content_type = (
filename
and mimetypes.guess_type(filename)[0]
or "application/octet-stream"
)
headers = Headers([("Content-Type", content_type)])
if filename is None:
write_binary(encoder.send_event(Field(name=key, headers=headers)))
else:
write_binary(
encoder.send_event(
File(name=key, filename=filename, headers=headers)
)
)
while True:
chunk = reader(16384)
if not chunk:
break
write_binary(encoder.send_event(Data(data=chunk, more_data=True)))
else:
if not isinstance(value, str):
value = str(value)
write_binary(encoder.send_event(Field(name=key, headers=Headers())))
write_binary(
encoder.send_event(Data(data=value.encode(charset), more_data=False))
)
write_binary(encoder.send_event(Epilogue(data=b"")))
length = stream.tell()
stream.seek(0)
return stream, length, boundary
def encode_multipart(
values: t.Mapping[str, t.Any],
boundary: t.Optional[str] = None,
charset: str = "utf-8",
) -> t.Tuple[str, bytes]:
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is bytes.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset
)
return boundary, stream.read()
class _TestCookieHeaders:
"""A headers adapter for cookielib"""
def __init__(self, headers: t.Union[Headers, t.List[t.Tuple[str, str]]]) -> None:
self.headers = headers
def getheaders(self, name: str) -> t.Iterable[str]:
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(
self, name: str, default: t.Optional[t.Iterable[str]] = None
) -> t.Iterable[str]:
headers = self.getheaders(name)
if not headers:
return default # type: ignore
return headers
class _TestCookieResponse:
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers: t.Union[Headers, t.List[t.Tuple[str, str]]]) -> None:
self.headers = _TestCookieHeaders(headers)
def info(self) -> _TestCookieHeaders:
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ: "WSGIEnvironment") -> None:
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = [f"{c.name}={c.value}" for c in self]
if cvals:
environ["HTTP_COOKIE"] = "; ".join(cvals)
else:
environ.pop("HTTP_COOKIE", None)
def extract_wsgi(
self,
environ: "WSGIEnvironment",
headers: t.Union[Headers, t.List[t.Tuple[str, str]]],
) -> None:
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers), # type: ignore
_UrllibRequest(get_current_url(environ)),
)
def _iter_data(data: t.Mapping[str, t.Any]) -> t.Iterator[t.Tuple[str, t.Any]]:
"""Iterate over a mapping that might have a list of values, yielding
all key, value pairs. Almost like iter_multi_items but only allows
lists, not tuples, of values so tuples can be used for files.
"""
if isinstance(data, MultiDict):
yield from data.items(multi=True)
else:
for key, value in data.items():
if isinstance(value, list):
for v in value:
yield key, v
else:
yield key, value
_TAnyMultiDict = t.TypeVar("_TAnyMultiDict", bound=MultiDict)
class EnvironBuilder:
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`Response.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str` or `bytes` object: The object is converted into an
:attr:`input_stream`, the :attr:`content_length` is set and you have to
provide a :attr:`content_type`.
- a `dict` or :class:`MultiDict`: The keys have to be strings. The values
have to be either any of the following objects, or a list of any of the
following objects:
- a :class:`file`-like object: These are converted into
:class:`FileStorage` objects automatically.
- a `tuple`: The :meth:`~FileMultiDict.add_file` method is called
with the key and the unpacked `tuple` items as positional
arguments.
- a `str`: The string is set as form data for the associated key.
- a file-like object: The object content is loaded in memory and then
handled like a regular `str` or a `bytes`.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data or a file-object.
See explanation above.
:param json: An object to be serialized and assigned to ``data``.
Defaults the content type to ``"application/json"``.
Serialized with the function assigned to :attr:`json_dumps`.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode string data.
:param auth: An authorization object to use for the
``Authorization`` header value. A ``(username, password)`` tuple
is a shortcut for ``Basic`` authorization.
.. versionchanged:: 2.0
``REQUEST_URI`` and ``RAW_URI`` is the full raw URI including
the query string, not only the path.
.. versionchanged:: 2.0
The default :attr:`request_class` is ``Request`` instead of
``BaseRequest``.
.. versionadded:: 2.0
Added the ``auth`` parameter.
.. versionadded:: 0.15
The ``json`` param and :meth:`json_dumps` method.
.. versionadded:: 0.15
The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing
the path before perecent-decoding. This is not part of the WSGI
PEP, but many WSGI servers include it.
.. versionchanged:: 0.6
``path`` and ``base_url`` can now be unicode strings that are
encoded with :func:`iri_to_uri`.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = "HTTP/1.1"
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: The default request class used by :meth:`get_request`.
request_class = Request
import json
#: The serialization function used when ``json`` is passed.
json_dumps = staticmethod(json.dumps)
del json
_args: t.Optional[MultiDict]
_query_string: t.Optional[str]
_input_stream: t.Optional[t.BinaryIO]
_form: t.Optional[MultiDict]
_files: t.Optional[FileMultiDict]
def __init__(
self,
path: str = "/",
base_url: t.Optional[str] = None,
query_string: t.Optional[t.Union[t.Mapping[str, str], str]] = None,
method: str = "GET",
input_stream: t.Optional[t.BinaryIO] = None,
content_type: t.Optional[str] = None,
content_length: t.Optional[int] = None,
errors_stream: t.Optional[t.TextIO] = None,
multithread: bool = False,
multiprocess: bool = False,
run_once: bool = False,
headers: t.Optional[t.Union[Headers, t.Iterable[t.Tuple[str, str]]]] = None,
data: t.Optional[t.Union[t.BinaryIO, str, bytes, t.Mapping[str, t.Any]]] = None,
environ_base: t.Optional[t.Mapping[str, t.Any]] = None,
environ_overrides: t.Optional[t.Mapping[str, t.Any]] = None,
charset: str = "utf-8",
mimetype: t.Optional[str] = None,
json: t.Optional[t.Mapping[str, t.Any]] = None,
auth: t.Optional[t.Union[Authorization, t.Tuple[str, str]]] = None,
) -> None:
path_s = _make_encode_wrapper(path)
if query_string is not None and path_s("?") in path:
raise ValueError("Query string is defined in the path and as an argument")
request_uri = url_parse(path)
if query_string is None and path_s("?") in path:
query_string = request_uri.query
self.charset = charset
self.path = iri_to_uri(request_uri.path)
self.request_uri = path
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url # type: ignore
if isinstance(query_string, (bytes, str)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if auth is not None:
if isinstance(auth, tuple):
auth = Authorization(
"basic", {"username": auth[0], "password": auth[1]}
)
self.headers.set("Authorization", auth.to_header())
if json is not None:
if data is not None:
raise TypeError("can't provide both json and data")
data = self.json_dumps(json)
if self.content_type is None:
self.content_type = "application/json"
if data:
if input_stream is not None:
raise TypeError("can't provide input stream and data")
if hasattr(data, "read"):
data = data.read() # type: ignore
if isinstance(data, str):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data): # type: ignore
if isinstance(value, (tuple, dict)) or hasattr(value, "read"):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
if mimetype is not None:
self.mimetype = mimetype
@classmethod
def from_environ(
cls, environ: "WSGIEnvironment", **kwargs: t.Any
) -> "EnvironBuilder":
"""Turn an environ dict back into a builder. Any extra kwargs
override the args extracted from the environ.
.. versionchanged:: 2.0
Path and query values are passed through the WSGI decoding
dance to avoid double encoding.
.. versionadded:: 0.15
"""
headers = Headers(EnvironHeaders(environ))
out = {
"path": _wsgi_decoding_dance(environ["PATH_INFO"]),
"base_url": cls._make_base_url(
environ["wsgi.url_scheme"],
headers.pop("Host"),
_wsgi_decoding_dance(environ["SCRIPT_NAME"]),
),
"query_string": _wsgi_decoding_dance(environ["QUERY_STRING"]),
"method": environ["REQUEST_METHOD"],
"input_stream": environ["wsgi.input"],
"content_type": headers.pop("Content-Type", None),
"content_length": headers.pop("Content-Length", None),
"errors_stream": environ["wsgi.errors"],
"multithread": environ["wsgi.multithread"],
"multiprocess": environ["wsgi.multiprocess"],
"run_once": environ["wsgi.run_once"],
"headers": headers,
}
out.update(kwargs)
return cls(**out)
def _add_file_from_data(
self,
key: str,
value: t.Union[
t.BinaryIO, t.Tuple[t.BinaryIO, str], t.Tuple[t.BinaryIO, str, str]
],
) -> None:
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
else:
self.files.add_file(key, value)
@staticmethod
def _make_base_url(scheme: str, host: str, script_root: str) -> str:
return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/"
@property
def base_url(self) -> str:
"""The base URL is used to extract the URL scheme, host name,
port, and root path.
"""
return self._make_base_url(self.url_scheme, self.host, self.script_root)
@base_url.setter
def base_url(self, value: t.Optional[str]) -> None:
if value is None:
scheme = "http"
netloc = "localhost"
script_root = ""
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError("base url must not contain a query string or fragment")
self.script_root = script_root.rstrip("/")
self.host = netloc
self.url_scheme = scheme
@property
def content_type(self) -> t.Optional[str]:
"""The content type for the request. Reflected from and to
the :attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.
"""
ct = self.headers.get("Content-Type")
if ct is None and not self._input_stream:
if self._files:
return "multipart/form-data"
if self._form:
return "application/x-www-form-urlencoded"
return None
return ct
@content_type.setter
def content_type(self, value: t.Optional[str]) -> None:
if value is None:
self.headers.pop("Content-Type", None)
else:
self.headers["Content-Type"] = value
@property
def mimetype(self) -> t.Optional[str]:
"""The mimetype (content type without charset etc.)
.. versionadded:: 0.14
"""
ct = self.content_type
return ct.split(";")[0].strip() if ct else None
@mimetype.setter
def mimetype(self, value: str) -> None:
self.content_type = get_content_type(value, self.charset)
@property
def mimetype_params(self) -> t.Mapping[str, str]:
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.14
"""
def on_update(d: t.Mapping[str, str]) -> None:
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
@property
def content_length(self) -> t.Optional[int]:
"""The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.
"""
return self.headers.get("Content-Length", type=int)
@content_length.setter
def content_length(self, value: t.Optional[int]) -> None:
if value is None:
self.headers.pop("Content-Length", None)
else:
self.headers["Content-Length"] = str(value)
def _get_form(self, name: str, storage: t.Type[_TAnyMultiDict]) -> _TAnyMultiDict:
"""Common behavior for getting the :attr:`form` and
:attr:`files` properties.
:param name: Name of the internal cached attribute.
:param storage: Storage class used for the data.
"""
if self.input_stream is not None:
raise AttributeError("an input stream is defined")
rv = getattr(self, name)
if rv is None:
rv = storage()
setattr(self, name, rv)
return rv # type: ignore
def _set_form(self, name: str, value: MultiDict) -> None:
"""Common behavior for setting the :attr:`form` and
:attr:`files` properties.
:param name: Name of the internal cached attribute.
:param value: Value to assign to the attribute.
"""
self._input_stream = None
setattr(self, name, value)
@property
def form(self) -> MultiDict:
"""A :class:`MultiDict` of form values."""
return self._get_form("_form", MultiDict)
@form.setter
def form(self, value: MultiDict) -> None:
self._set_form("_form", value)
@property
def files(self) -> FileMultiDict:
"""A :class:`FileMultiDict` of uploaded files. Use
:meth:`~FileMultiDict.add_file` to add new files.
"""
return self._get_form("_files", FileMultiDict)
@files.setter
def files(self, value: FileMultiDict) -> None:
self._set_form("_files", value)
@property
def input_stream(self) -> t.Optional[t.BinaryIO]:
"""An optional input stream. This is mutually exclusive with
setting :attr:`form` and :attr:`files`, setting it will clear
those. Do not provide this if the method is not ``POST`` or
another method that has a body.
"""
return self._input_stream
@input_stream.setter
def input_stream(self, value: t.Optional[t.BinaryIO]) -> None:
self._input_stream = value
self._form = None
self._files = None
@property
def query_string(self) -> str:
"""The query string. If you set this to a string
:attr:`args` will no longer be available.
"""
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ""
return self._query_string
@query_string.setter
def query_string(self, value: t.Optional[str]) -> None:
self._query_string = value
self._args = None
@property
def args(self) -> MultiDict:
"""The URL arguments as :class:`MultiDict`."""
if self._query_string is not None:
raise AttributeError("a query string is defined")
if self._args is None:
self._args = MultiDict()
return self._args
@args.setter
def args(self, value: t.Optional[MultiDict]) -> None:
self._query_string = None
self._args = value
@property
def server_name(self) -> str:
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(":", 1)[0]
@property
def server_port(self) -> int:
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(":", 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
if self.url_scheme == "https":
return 443
return 80
def __del__(self) -> None:
try:
self.close()
except Exception:
pass
def close(self) -> None:
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = self.files.values()
except AttributeError:
files = () # type: ignore
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self) -> "WSGIEnvironment":
"""Return the built environ.
.. versionchanged:: 0.15
The content type and length headers are set based on
input stream detection. Previously this only set the WSGI
keys.
"""
input_stream = self.input_stream
content_length = self.content_length
mimetype = self.mimetype
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif mimetype == "multipart/form-data":
input_stream, content_length, boundary = stream_encode_multipart(
CombinedMultiDict([self.form, self.files]), charset=self.charset
)
content_type = f'{mimetype}; boundary="{boundary}"'
elif mimetype == "application/x-www-form-urlencoded":
form_encoded = url_encode(self.form, charset=self.charset).encode("ascii")
content_length = len(form_encoded)
input_stream = BytesIO(form_encoded)
else:
input_stream = BytesIO()
result: "WSGIEnvironment" = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x: str) -> str:
return _wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
raw_uri = _wsgi_encoding_dance(self.request_uri, self.charset)
result.update(
{
"REQUEST_METHOD": self.method,
"SCRIPT_NAME": _path_encode(self.script_root),
"PATH_INFO": _path_encode(self.path),
"QUERY_STRING": _wsgi_encoding_dance(self.query_string, self.charset),
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": raw_uri,
# Non-standard, added by gunicorn
"RAW_URI": raw_uri,
"SERVER_NAME": self.server_name,
"SERVER_PORT": str(self.server_port),
"HTTP_HOST": self.host,
"SERVER_PROTOCOL": self.server_protocol,
"wsgi.version": self.wsgi_version,
"wsgi.url_scheme": self.url_scheme,
"wsgi.input": input_stream,
"wsgi.errors": self.errors_stream,
"wsgi.multithread": self.multithread,
"wsgi.multiprocess": self.multiprocess,
"wsgi.run_once": self.run_once,
}
)
headers = self.headers.copy()
if content_type is not None:
result["CONTENT_TYPE"] = content_type
headers.set("Content-Type", content_type)
if content_length is not None:
result["CONTENT_LENGTH"] = str(content_length)
headers.set("Content-Length", content_length)
combined_headers = defaultdict(list)
for key, value in headers.to_wsgi_list():
combined_headers[f"HTTP_{key.upper().replace('-', '_')}"].append(value)
for key, values in combined_headers.items():
result[key] = ", ".join(values)
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls: t.Optional[t.Type[Request]] = None) -> Request:
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client:
"""This class allows you to send requests to a wrapped application.
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionchanged:: 2.0
``response_wrapper`` is always a subclass of
:class:``TestResponse``.
.. versionchanged:: 0.5
Added the ``use_cookies`` parameter.
"""
def __init__(
self,
application: "WSGIApplication",
response_wrapper: t.Optional[t.Type["Response"]] = None,
use_cookies: bool = True,
allow_subdomain_redirects: bool = False,
) -> None:
self.application = application
if response_wrapper in {None, Response}:
response_wrapper = TestResponse
elif not isinstance(response_wrapper, TestResponse):
response_wrapper = type(
"WrapperTestResponse",
(TestResponse, response_wrapper), # type: ignore
{},
)
self.response_wrapper = t.cast(t.Type["TestResponse"], response_wrapper)
if use_cookies:
self.cookie_jar: t.Optional[_TestCookieJar] = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(
self,
server_name: str,
key: str,
value: str = "",
max_age: t.Optional[t.Union[timedelta, int]] = None,
expires: t.Optional[t.Union[str, datetime, int, float]] = None,
path: str = "/",
domain: t.Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: t.Optional[str] = None,
charset: str = "utf-8",
) -> None:
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, "cookies disabled"
header = dump_cookie(
key,
value,
max_age,
expires,
path,
domain,
secure,
httponly,
charset,
samesite=samesite,
)
environ = create_environ(path, base_url=f"http://{server_name}")
headers = [("Set-Cookie", header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(
self,
server_name: str,
key: str,
path: str = "/",
domain: t.Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: t.Optional[str] = None,
) -> None:
"""Deletes a cookie in the test client."""
self.set_cookie(
server_name,
key,
expires=0,
max_age=0,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
samesite=samesite,
)
def run_wsgi_app(
self, environ: "WSGIEnvironment", buffered: bool = False
) -> t.Tuple[t.Iterable[bytes], str, Headers]:
"""Runs the wrapped WSGI app with the given environment.
:meta private:
"""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(
self, response: "TestResponse", buffered: bool = False
) -> "TestResponse":
"""Perform a new request to the location given by the redirect
response to the previous request.
:meta private:
"""
scheme, netloc, path, qs, anchor = url_parse(response.location)
builder = EnvironBuilder.from_environ(response.request.environ, query_string=qs)
to_name_parts = netloc.split(":", 1)[0].split(".")
from_name_parts = builder.server_name.split(".")
if to_name_parts != [""]:
# The new location has a host, use it for the base URL.
builder.url_scheme = scheme
builder.host = netloc
else:
# A local redirect with autocorrect_location_header=False
# doesn't have a host, so use the request's host.
to_name_parts = from_name_parts
# Explain why a redirect to a different server name won't be followed.
if to_name_parts != from_name_parts:
if to_name_parts[-len(from_name_parts) :] == from_name_parts:
if not self.allow_subdomain_redirects:
raise RuntimeError("Following subdomain redirects is not enabled.")
else:
raise RuntimeError("Following external redirects is not supported.")
path_parts = path.split("/")
root_parts = builder.script_root.split("/")
if path_parts[: len(root_parts)] == root_parts:
# Strip the script root from the path.
builder.path = path[len(builder.script_root) :]
else:
# The new location is not under the script root, so use the
# whole path and clear the previous root.
builder.path = path
builder.script_root = ""
# Only 307 and 308 preserve all of the original request.
if response.status_code not in {307, 308}:
# HEAD is preserved, everything else becomes GET.
if builder.method != "HEAD":
builder.method = "GET"
# Clear the body and the headers that describe it.
if builder.input_stream is not None:
builder.input_stream.close()
builder.input_stream = None
builder.content_type = None
builder.content_length = None
builder.headers.pop("Transfer-Encoding", None)
return self.open(builder, buffered=buffered)
def open(
self,
*args: t.Any,
as_tuple: bool = False,
buffered: bool = False,
follow_redirects: bool = False,
**kwargs: t.Any,
) -> "TestResponse":
"""Generate an environ dict from the given arguments, make a
request to the application using it, and return the response.
:param args: Passed to :class:`EnvironBuilder` to create the
environ for the request. If a single arg is passed, it can
be an existing :class:`EnvironBuilder` or an environ dict.
:param buffered: Convert the iterator returned by the app into
a list. If the iterator has a ``close()`` method, it is
called automatically.
:param follow_redirects: Make additional requests to follow HTTP
redirects until a non-redirect status is returned.
:attr:`TestResponse.history` lists the intermediate
responses.
.. versionchanged:: 2.0
``as_tuple`` is deprecated and will be removed in Werkzeug
2.1. Use :attr:`TestResponse.request` and
``request.environ`` instead.
.. versionchanged:: 2.0
The request input stream is closed when calling
``response.close()``. Input streams for redirects are
automatically closed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the ``data``
parameter the content type has to be called ``content_type``
instead of ``mimetype``. This change was made for
consistency with :class:`werkzeug.FileWrapper`.
.. versionchanged:: 0.5
Added the ``follow_redirects`` parameter.
"""
request: t.Optional["Request"] = None
if not kwargs and len(args) == 1:
arg = args[0]
if isinstance(arg, EnvironBuilder):
request = arg.get_request()
elif isinstance(arg, dict):
request = EnvironBuilder.from_environ(arg).get_request()
elif isinstance(arg, Request):
request = arg
if request is None:
builder = EnvironBuilder(*args, **kwargs)
try:
request = builder.get_request()
finally:
builder.close()
response = self.run_wsgi_app(request.environ, buffered=buffered)
response = self.response_wrapper(*response, request=request)
redirects = set()
history: t.List["TestResponse"] = []
while follow_redirects and response.status_code in {
301,
302,
303,
305,
307,
308,
}:
# Exhaust intermediate response bodies to ensure middleware
# that returns an iterator runs any cleanup code.
if not buffered:
response.make_sequence()
response.close()
new_redirect_entry = (response.location, response.status_code)
if new_redirect_entry in redirects:
raise ClientRedirectError(
f"Loop detected: A {response.status_code} redirect"
f" to {response.location} was already made."
)
redirects.add(new_redirect_entry)
response.history = tuple(history)
history.append(response)
response = self.resolve_redirect(response, buffered=buffered)
else:
# This is the final request after redirects, or not
# following redirects.
response.history = tuple(history)
# Close the input stream when closing the response, in case
# the input is an open temporary file.
response.call_on_close(request.input_stream.close)
if as_tuple:
warnings.warn(
"'as_tuple' is deprecated and will be removed in"
" Werkzeug 2.1. Access 'response.request.environ'"
" instead.",
DeprecationWarning,
stacklevel=2,
)
return request.environ, response # type: ignore
return response
def get(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``GET``."""
kw["method"] = "GET"
return self.open(*args, **kw)
def post(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``POST``."""
kw["method"] = "POST"
return self.open(*args, **kw)
def put(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``PUT``."""
kw["method"] = "PUT"
return self.open(*args, **kw)
def delete(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``DELETE``."""
kw["method"] = "DELETE"
return self.open(*args, **kw)
def patch(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``PATCH``."""
kw["method"] = "PATCH"
return self.open(*args, **kw)
def options(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``OPTIONS``."""
kw["method"] = "OPTIONS"
return self.open(*args, **kw)
def head(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``HEAD``."""
kw["method"] = "HEAD"
return self.open(*args, **kw)
def trace(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``TRACE``."""
kw["method"] = "TRACE"
return self.open(*args, **kw)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.application!r}>"
def create_environ(*args: t.Any, **kwargs: t.Any) -> "WSGIEnvironment":
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(
app: "WSGIApplication", environ: "WSGIEnvironment", buffered: bool = False
) -> t.Tuple[t.Iterable[bytes], str, Headers]:
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
# Copy environ to ensure any mutations by the app (ProxyFix, for
# example) don't affect subsequent requests (such as redirects).
environ = _get_environ(environ).copy()
status: str
response: t.Optional[t.Tuple[str, t.List[t.Tuple[str, str]]]] = None
buffer: t.List[bytes] = []
def start_response(status, headers, exc_info=None): # type: ignore
nonlocal response
if exc_info:
try:
raise exc_info[1].with_traceback(exc_info[2])
finally:
exc_info = None
response = (status, headers)
return buffer.append
app_rv = app(environ, start_response)
close_func = getattr(app_rv, "close", None)
app_iter: t.Iterable[bytes] = iter(app_rv)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have a response, chain
# the already received data with the already collected data and wrap it in
# a new `ClosingIterator` if we need to restore a `close` callable from the
# original return value.
else:
for item in app_iter:
buffer.append(item)
if response is not None:
break
if buffer:
app_iter = chain(buffer, app_iter)
if close_func is not None and app_iter is not app_rv:
app_iter = ClosingIterator(app_iter, close_func)
status, headers = response # type: ignore
return app_iter, status, Headers(headers)
class TestResponse(Response):
""":class:`~werkzeug.wrappers.Response` subclass that provides extra
information about requests made with the test :class:`Client`.
Test client requests will always return an instance of this class.
If a custom response class is passed to the client, it is
subclassed along with this to support test information.
If the test request included large files, or if the application is
serving a file, call :meth:`close` to close any open files and
prevent Python showing a ``ResourceWarning``.
"""
request: Request
"""A request object with the environ used to make the request that
resulted in this response.
"""
history: t.Tuple["TestResponse", ...]
"""A list of intermediate responses. Populated when the test request
is made with ``follow_redirects`` enabled.
"""
def __init__(
self,
response: t.Iterable[bytes],
status: str,
headers: Headers,
request: Request,
history: t.Tuple["TestResponse"] = (), # type: ignore
**kwargs: t.Any,
) -> None:
super().__init__(response, status, headers, **kwargs)
self.request = request
self.history = history
self._compat_tuple = response, status, headers
def __iter__(self) -> t.Iterator:
warnings.warn(
(
"The test client no longer returns a tuple, it returns"
" a 'TestResponse'. Tuple unpacking is deprecated and"
" will be removed in Werkzeug 2.1. Access the"
" attributes 'data', 'status', and 'headers' instead."
),
DeprecationWarning,
stacklevel=2,
)
return iter(self._compat_tuple)
def __getitem__(self, item: int) -> t.Any:
warnings.warn(
(
"The test client no longer returns a tuple, it returns"
" a 'TestResponse'. Item indexing is deprecated and"
" will be removed in Werkzeug 2.1. Access the"
" attributes 'data', 'status', and 'headers' instead."
),
DeprecationWarning,
stacklevel=2,
)
return self._compat_tuple[item]
| 36.297358 | 88 | 0.600345 |
593a87512de1f3c11689661f5eb7abcbe6e5c2bf | 3,273 | py | Python | TimeFreeze/main.py | Ashish013/Image-Filters | c49d98232d0b44c0a77a11693658d689dcf89457 | [
"MIT"
] | null | null | null | TimeFreeze/main.py | Ashish013/Image-Filters | c49d98232d0b44c0a77a11693658d689dcf89457 | [
"MIT"
] | null | null | null | TimeFreeze/main.py | Ashish013/Image-Filters | c49d98232d0b44c0a77a11693658d689dcf89457 | [
"MIT"
] | null | null | null | import cv2,time,argparse
import numpy as np
import matplotlib.pyplot as plt
from ssim_utils import generate_ssim_mask
from rcnn_utils import generate_rcnn_mask
from utils import files_downloader
ap = argparse.ArgumentParser(description = "Filter that captures instances, every '-time' seconds")
ap.add_argument("-m","--method",required = True, help = "Method used for detection",choices = ["ssim","rcnn"], type = str)
ap.add_argument("-v","--vid_input",help = "Video input path",type = str)
ap.add_argument("-t","--time",help = "Time difference between 2 instances", default = 5, type = int)
ap.add_argument("-f","--font", help = "Font to display timer", default = "cv2.FONT_HERSHEY_COMPLEX", type = str)
args = vars(ap.parse_args())
if args["vid_input"] != None:
cap = cv2.VideoCapture(args["vid_input"])
else:
cap = cv2.VideoCapture(0)
# Important args used in the script
method = args["method"]
buffer_time = args["time"]
font = eval(args["font"])
start_time = time.time()
# Offset position of the timer from the ends of the frame
offset = 75
# Triggers used to control the program flow
prev_num = 1
first_snap = False
first_frame = False
while(cap.isOpened()):
ret,frame = cap.read()
if ret == False:
break
if np.all(frame) == None:
break
if(first_snap == False):
stitched_img = frame
if(method == "ssim"):
if(first_frame == False):
print("Capturing Backgound.........Completed !")
# Captures the static background in the first frame,
# which is used later for computing ssim
bg = frame
first_frame = True
thresh = generate_ssim_mask(frame,bg)
inv_thresh = cv2.bitwise_not(thresh)
elif(method == "rcnn"):
if(first_frame == False):
# Downloads the required files for applying
# Mask-RCNN, in the first frame
rcnn_file_path = files_downloader()
first_frame = True
thresh = generate_rcnn_mask(frame,rcnn_file_path = rcnn_file_path )
inv_thresh = cv2.bitwise_not(thresh)
fg_mask = cv2.bitwise_and(frame,frame,mask = thresh)
bg_mask = cv2.bitwise_and(stitched_img,stitched_img,mask = inv_thresh)
# The final image after masking is stored in temp which is copied to
# stitched_img variable after every 'buffer_time' seconds
temp = cv2.bitwise_or(fg_mask,bg_mask)
time_diff = int(time.time() - start_time)
if((time_diff % buffer_time == 0) and time_diff >= prev_num):
if(first_snap == False):
first_snap = True
stitched_img = temp.copy()
cv2.putText(temp,"Snap !",(temp.shape[1] - offset - 100, offset),fontFace = font,fontScale = 1,color = (255,255,255),thickness = 2)
prev_num = time_diff+1
else:
cv2.putText(temp,str(time_diff % buffer_time),(temp.shape[1] - offset, offset),fontFace = font,fontScale = 1.5,color = (255,255,255),thickness = 2)
val = cv2.waitKey(1)
cv2.imshow("Image",temp)
#Breaks out of the loop when ESC key is pressed !
if (val == 27):
break
cv2.imwrite("TimeFreezeFilter.jpg",stitched_img)
cap.release()
cv2.destroyAllWindows()
| 35.193548 | 155 | 0.64528 |
e988813543914bfa8055c137d4d6a195235b6f59 | 24,856 | py | Python | ax/modelbridge/tests/test_base_modelbridge.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | 1 | 2022-02-10T10:51:40.000Z | 2022-02-10T10:51:40.000Z | ax/modelbridge/tests/test_base_modelbridge.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | null | null | null | ax/modelbridge/tests/test_base_modelbridge.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from unittest import mock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.data import Data
from ax.core.metric import Metric
from ax.core.objective import Objective, ScalarizedObjective
from ax.core.observation import (
ObservationData,
ObservationFeatures,
observations_from_data,
)
from ax.core.optimization_config import OptimizationConfig
from ax.core.parameter import FixedParameter, ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.modelbridge.base import (
ModelBridge,
gen_arms,
unwrap_observation_data,
clamp_observation_features,
)
from ax.modelbridge.registry import Models
from ax.modelbridge.transforms.log import Log
from ax.models.base import Model
from ax.utils.common.constants import Keys
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import (
get_branin_experiment_with_multi_objective,
get_experiment,
get_non_monolithic_branin_moo_data,
)
from ax.utils.testing.core_stubs import (
get_experiment_with_repeated_arms,
get_optimization_config_no_constraints,
get_search_space_for_range_value,
get_search_space_for_range_values,
get_search_space_for_value,
)
from ax.utils.testing.modeling_stubs import (
get_experiment_for_value,
get_observation1,
get_observation1trans,
get_observation2,
get_observation2trans,
get_observation_status_quo0,
get_observation_status_quo1,
transform_1,
transform_2,
)
class BaseModelBridgeTest(TestCase):
@mock.patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=([get_observation1(), get_observation2()]),
)
@mock.patch(
"ax.modelbridge.base.gen_arms",
autospec=True,
return_value=([Arm(parameters={})], None),
)
@mock.patch("ax.modelbridge.base.ModelBridge._fit", autospec=True)
def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
# Test that on init transforms are stored and applied in the correct order
transforms = [transform_1, transform_2]
exp = get_experiment_for_value()
ss = get_search_space_for_value()
modelbridge = ModelBridge(
search_space=ss,
model=Model(),
transforms=transforms,
experiment=exp,
data=0,
)
self.assertFalse(
modelbridge._experiment_has_immutable_search_space_and_opt_config
)
self.assertEqual(
list(modelbridge.transforms.keys()), ["Cast", "transform_1", "transform_2"]
)
fit_args = mock_fit.mock_calls[0][2]
self.assertTrue(fit_args["search_space"] == get_search_space_for_value(8.0))
self.assertTrue(fit_args["observation_features"] == [])
self.assertTrue(fit_args["observation_data"] == [])
self.assertTrue(mock_observations_from_data.called)
# Test prediction on out of design features.
modelbridge._predict = mock.MagicMock(
"ax.modelbridge.base.ModelBridge._predict",
autospec=True,
side_effect=ValueError("Out of Design"),
)
# This point is in design, and thus failures in predict are legitimate.
with mock.patch.object(
ModelBridge, "model_space", return_value=get_search_space_for_range_values
):
with self.assertRaises(ValueError):
modelbridge.predict([get_observation2().features])
# This point is out of design, and not in training data.
with self.assertRaises(ValueError):
modelbridge.predict([get_observation_status_quo0().features])
# Now it's in the training data.
with mock.patch.object(
ModelBridge,
"get_training_data",
return_value=[get_observation_status_quo0()],
):
# Return raw training value.
self.assertEqual(
modelbridge.predict([get_observation_status_quo0().features]),
unwrap_observation_data([get_observation_status_quo0().data]),
)
# Test that transforms are applied correctly on predict
modelbridge._predict = mock.MagicMock(
"ax.modelbridge.base.ModelBridge._predict",
autospec=True,
return_value=[get_observation2trans().data],
)
modelbridge.predict([get_observation2().features])
# Observation features sent to _predict are un-transformed afterwards
modelbridge._predict.assert_called_with([get_observation2().features])
# Check that _single_predict is equivalent here.
modelbridge._single_predict([get_observation2().features])
# Observation features sent to _predict are un-transformed afterwards
modelbridge._predict.assert_called_with([get_observation2().features])
# Test transforms applied on gen
modelbridge._gen = mock.MagicMock(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
return_value=([get_observation1trans().features], [2], None, {}),
)
oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
modelbridge._set_kwargs_to_save(
model_key="TestModel", model_kwargs={}, bridge_kwargs={}
)
gr = modelbridge.gen(
n=1,
search_space=get_search_space_for_value(),
optimization_config=oc,
pending_observations={"a": [get_observation2().features]},
fixed_features=ObservationFeatures({"x": 5}),
)
self.assertEqual(gr._model_key, "TestModel")
modelbridge._gen.assert_called_with(
n=1,
search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
optimization_config=oc,
pending_observations={"a": [get_observation2trans().features]},
fixed_features=ObservationFeatures({"x": 36}),
model_gen_options=None,
)
mock_gen_arms.assert_called_with(
arms_by_signature={}, observation_features=[get_observation1().features]
)
# Gen with no pending observations and no fixed features
modelbridge.gen(
n=1, search_space=get_search_space_for_value(), optimization_config=None
)
modelbridge._gen.assert_called_with(
n=1,
search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
optimization_config=None,
pending_observations={},
fixed_features=ObservationFeatures({}),
model_gen_options=None,
)
# Gen with multi-objective optimization config.
oc2 = OptimizationConfig(
objective=ScalarizedObjective(
metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
)
)
modelbridge.gen(
n=1, search_space=get_search_space_for_value(), optimization_config=oc2
)
modelbridge._gen.assert_called_with(
n=1,
search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
optimization_config=oc2,
pending_observations={},
fixed_features=ObservationFeatures({}),
model_gen_options=None,
)
# Test transforms applied on cross_validate
modelbridge._cross_validate = mock.MagicMock(
"ax.modelbridge.base.ModelBridge._cross_validate",
autospec=True,
return_value=[get_observation1trans().data],
)
cv_training_data = [get_observation2()]
cv_test_points = [get_observation1().features]
cv_predictions = modelbridge.cross_validate(
cv_training_data=cv_training_data, cv_test_points=cv_test_points
)
modelbridge._cross_validate.assert_called_with(
search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
obs_feats=[get_observation2trans().features],
obs_data=[get_observation2trans().data],
cv_test_points=[get_observation1().features], # untransformed after
)
self.assertTrue(cv_predictions == [get_observation1().data])
# Test stored training data
obs = modelbridge.get_training_data()
self.assertTrue(obs == [get_observation1(), get_observation2()])
self.assertEqual(modelbridge.metric_names, {"a", "b"})
self.assertIsNone(modelbridge.status_quo)
self.assertTrue(modelbridge.model_space == get_search_space_for_value())
self.assertEqual(modelbridge.training_in_design, [False, False])
with self.assertRaises(ValueError):
modelbridge.training_in_design = [True, True, False]
with self.assertRaises(ValueError):
modelbridge.training_in_design = [True, True, False]
# Test feature_importances
with self.assertRaises(NotImplementedError):
modelbridge.feature_importances("a")
# Test transform observation features
with mock.patch(
"ax.modelbridge.base.ModelBridge._transform_observation_features",
autospec=True,
) as mock_tr:
modelbridge.transform_observation_features([get_observation2().features])
mock_tr.assert_called_with(modelbridge, [get_observation2trans().features])
@mock.patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=([get_observation1(), get_observation2()]),
)
def test_ood_gen(self, _):
# Test fit_out_of_design by returning OOD candidats
exp = get_experiment_for_value()
ss = SearchSpace([RangeParameter("x", ParameterType.FLOAT, 0.0, 1.0)])
modelbridge = ModelBridge(
search_space=ss,
model=Model(),
transforms=[],
experiment=exp,
data=0,
fit_out_of_design=True,
)
obs = ObservationFeatures(parameters={"x": 3.0})
modelbridge._gen = mock.MagicMock(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
return_value=([obs], [2], None, {}),
)
gr = modelbridge.gen(n=1)
self.assertEqual(gr.arms[0].parameters, obs.parameters)
# Test clamping arms by setting fit_out_of_design=False
modelbridge = ModelBridge(
search_space=ss,
model=Model(),
transforms=[],
experiment=exp,
data=0,
fit_out_of_design=False,
)
obs = ObservationFeatures(parameters={"x": 3.0})
modelbridge._gen = mock.MagicMock(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
return_value=([obs], [2], None, {}),
)
gr = modelbridge.gen(n=1)
self.assertEqual(gr.arms[0].parameters, {"x": 1.0})
@mock.patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=([get_observation1()]),
)
@mock.patch("ax.modelbridge.base.ModelBridge._fit", autospec=True)
def testSetStatusQuo(self, mock_fit, mock_observations_from_data):
# NOTE: If empty data object is not passed, observations are not
# extracted, even with mock.
modelbridge = ModelBridge(
search_space=get_search_space_for_value(),
model=0,
experiment=get_experiment_for_value(),
data=Data(),
status_quo_name="1_1",
)
self.assertEqual(modelbridge.status_quo, get_observation1())
# Alternatively, we can specify by features
modelbridge = ModelBridge(
get_search_space_for_value(),
0,
[],
get_experiment_for_value(),
0,
status_quo_features=get_observation1().features,
)
self.assertEqual(modelbridge.status_quo, get_observation1())
# Alternatively, we can specify on experiment
# Put a dummy arm with SQ name 1_1 on the dummy experiment.
exp = get_experiment_for_value()
sq = Arm(name="1_1", parameters={"x": 3.0})
exp._status_quo = sq
# Check that we set SQ to arm 1_1
modelbridge = ModelBridge(get_search_space_for_value(), 0, [], exp, 0)
self.assertEqual(modelbridge.status_quo, get_observation1())
# Errors if features and name both specified
with self.assertRaises(ValueError):
modelbridge = ModelBridge(
get_search_space_for_value(),
0,
[],
exp,
0,
status_quo_features=get_observation1().features,
status_quo_name="1_1",
)
# Left as None if features or name don't exist
modelbridge = ModelBridge(
get_search_space_for_value(), 0, [], exp, 0, status_quo_name="1_0"
)
self.assertIsNone(modelbridge.status_quo)
modelbridge = ModelBridge(
get_search_space_for_value(),
0,
[],
get_experiment_for_value(),
0,
status_quo_features=ObservationFeatures(parameters={"x": 3.0, "y": 10.0}),
)
self.assertIsNone(modelbridge.status_quo)
@mock.patch(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
)
def test_status_quo_for_non_monolithic_data(self, mock_gen):
mock_gen.return_value = (
[
ObservationFeatures(
parameters={"x1": float(i), "x2": float(i)}, trial_index=np.int64(1)
)
for i in range(5)
],
[1] * 5,
None,
{},
)
exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
sobol = Models.SOBOL(search_space=exp.search_space)
exp.new_batch_trial(sobol.gen(5)).set_status_quo_and_optimize_power(
status_quo=exp.status_quo
).run()
# create data where metrics vary in start and end times
data = get_non_monolithic_branin_moo_data()
with warnings.catch_warnings(record=True) as ws:
bridge = ModelBridge(
experiment=exp,
data=data,
model=Model(),
search_space=exp.search_space,
)
# just testing it doesn't error
bridge.gen(5)
self.assertTrue(any("start_time" in str(w.message) for w in ws))
self.assertTrue(any("end_time" in str(w.message) for w in ws))
self.assertEqual(bridge.status_quo.arm_name, "status_quo")
@mock.patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=(
[
get_observation_status_quo0(),
get_observation_status_quo1(),
get_observation1(),
get_observation2(),
]
),
)
@mock.patch("ax.modelbridge.base.ModelBridge._fit", autospec=True)
def testSetStatusQuoMultipleObs(self, mock_fit, mock_observations_from_data):
exp = get_experiment_with_repeated_arms(2)
trial_index = 1
status_quo_features = ObservationFeatures(
parameters=exp.trials[trial_index].status_quo.parameters,
trial_index=trial_index,
)
modelbridge = ModelBridge(
get_search_space_for_value(),
0,
[],
exp,
0,
status_quo_features=status_quo_features,
)
# Check that for experiments with many trials the status quo is set
# to the value of the status quo of the last trial.
if len(exp.trials) >= 1:
self.assertEqual(modelbridge.status_quo, get_observation_status_quo1())
@mock.patch(
"ax.modelbridge.base.observations_from_data",
autospec=True,
return_value=([get_observation1(), get_observation1()]),
)
@mock.patch("ax.modelbridge.base.ModelBridge._fit", autospec=True)
def testSetTrainingDataDupFeatures(self, mock_fit, mock_observations_from_data):
# Throws an error if repeated features in observations.
with self.assertRaises(ValueError):
ModelBridge(
get_search_space_for_value(),
0,
[],
get_experiment_for_value(),
0,
status_quo_name="1_1",
)
def testUnwrapObservationData(self):
observation_data = [get_observation1().data, get_observation2().data]
f, cov = unwrap_observation_data(observation_data)
self.assertEqual(f["a"], [2.0, 2.0])
self.assertEqual(f["b"], [4.0, 1.0])
self.assertEqual(cov["a"]["a"], [1.0, 2.0])
self.assertEqual(cov["b"]["b"], [4.0, 5.0])
self.assertEqual(cov["a"]["b"], [2.0, 3.0])
self.assertEqual(cov["b"]["a"], [3.0, 4.0])
# Check that errors if metric mismatch
od3 = ObservationData(
metric_names=["a"], means=np.array([2.0]), covariance=np.array([[4.0]])
)
with self.assertRaises(ValueError):
unwrap_observation_data(observation_data + [od3])
def testGenArms(self):
p1 = {"x": 0, "y": 1}
p2 = {"x": 4, "y": 8}
observation_features = [
ObservationFeatures(parameters=p1),
ObservationFeatures(parameters=p2),
]
arms, candidate_metadata = gen_arms(observation_features=observation_features)
self.assertEqual(arms[0].parameters, p1)
self.assertIsNone(candidate_metadata)
arm = Arm(name="1_1", parameters=p1)
arms_by_signature = {arm.signature: arm}
observation_features[0].metadata = {"some_key": "some_val_0"}
observation_features[1].metadata = {"some_key": "some_val_1"}
arms, candidate_metadata = gen_arms(
observation_features=observation_features,
arms_by_signature=arms_by_signature,
)
self.assertEqual(arms[0].name, "1_1")
self.assertEqual(
candidate_metadata,
{
arms[0].signature: {"some_key": "some_val_0"},
arms[1].signature: {"some_key": "some_val_1"},
},
)
@mock.patch(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
return_value=([get_observation1trans().features], [2], None, {}),
)
@mock.patch(
"ax.modelbridge.base.ModelBridge.predict", autospec=True, return_value=None
)
def testGenWithDefaults(self, _, mock_gen):
exp = get_experiment_for_value()
exp.optimization_config = get_optimization_config_no_constraints()
ss = get_search_space_for_range_value()
modelbridge = ModelBridge(
search_space=ss, model=Model(), transforms=[], experiment=exp
)
modelbridge.gen(1)
mock_gen.assert_called_with(
modelbridge,
n=1,
search_space=ss,
fixed_features=ObservationFeatures(parameters={}),
model_gen_options=None,
optimization_config=OptimizationConfig(
objective=Objective(metric=Metric("test_metric"), minimize=False),
outcome_constraints=[],
),
pending_observations={},
)
@mock.patch(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
return_value=([get_observation1trans().features], [2], None, {}),
)
@mock.patch(
"ax.modelbridge.base.ModelBridge.predict", autospec=True, return_value=None
)
def test_gen_on_experiment_with_imm_ss_and_opt_conf(self, _, __):
exp = get_experiment_for_value()
exp._properties[Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF] = True
exp.optimization_config = get_optimization_config_no_constraints()
ss = get_search_space_for_range_value()
modelbridge = ModelBridge(
search_space=ss, model=Model(), transforms=[], experiment=exp
)
self.assertTrue(
modelbridge._experiment_has_immutable_search_space_and_opt_config
)
gr = modelbridge.gen(1)
self.assertIsNone(gr.optimization_config)
self.assertIsNone(gr.search_space)
@mock.patch(
"ax.modelbridge.base.ModelBridge._gen",
autospec=True,
side_effect=[
([get_observation1trans().features], [2], None, {}),
([get_observation2trans().features], [2], None, {}),
([get_observation2().features], [2], None, {}),
],
)
@mock.patch("ax.modelbridge.base.ModelBridge._update", autospec=True)
def test_update(self, _mock_update, _mock_gen):
exp = get_experiment_for_value()
exp.optimization_config = get_optimization_config_no_constraints()
ss = get_search_space_for_range_values()
exp.search_space = ss
modelbridge = ModelBridge(
search_space=ss, model=Model(), transforms=[Log], experiment=exp
)
exp.new_trial(generator_run=modelbridge.gen(1))
modelbridge._set_training_data(
observations_from_data(
data=Data(
pd.DataFrame(
[
{
"arm_name": "0_0",
"metric_name": "m1",
"mean": 3.0,
"sem": 1.0,
}
]
)
),
experiment=exp,
),
ss,
)
exp.new_trial(generator_run=modelbridge.gen(1))
modelbridge.update(
new_data=Data(
pd.DataFrame(
[{"arm_name": "1_0", "metric_name": "m1", "mean": 5.0, "sem": 0.0}]
)
),
experiment=exp,
)
exp.new_trial(generator_run=modelbridge.gen(1))
# Trying to update with unrecognised metric should error.
with self.assertRaisesRegex(ValueError, "Unrecognised metric"):
modelbridge.update(
new_data=Data(
pd.DataFrame(
[
{
"arm_name": "1_0",
"metric_name": "m2",
"mean": 5.0,
"sem": 0.0,
}
]
)
),
experiment=exp,
)
class testClampObservationFeatures(TestCase):
def testClampObservationFeaturesNearBounds(self):
cases = [
(
ObservationFeatures(
parameters={"w": 1.0, "x": 2, "y": "foo", "z": True}
),
ObservationFeatures(
parameters={"w": 1.0, "x": 2, "y": "foo", "z": True}
),
),
(
ObservationFeatures(
parameters={"w": 0.0, "x": 2, "y": "foo", "z": True}
),
ObservationFeatures(
parameters={"w": 0.5, "x": 2, "y": "foo", "z": True}
),
),
(
ObservationFeatures(
parameters={"w": 100.0, "x": 2, "y": "foo", "z": True}
),
ObservationFeatures(
parameters={"w": 5.5, "x": 2, "y": "foo", "z": True}
),
),
(
ObservationFeatures(
parameters={"w": 1.0, "x": 0, "y": "foo", "z": True}
),
ObservationFeatures(
parameters={"w": 1.0, "x": 1, "y": "foo", "z": True}
),
),
(
ObservationFeatures(
parameters={"w": 1.0, "x": 11, "y": "foo", "z": True}
),
ObservationFeatures(
parameters={"w": 1.0, "x": 10, "y": "foo", "z": True}
),
),
]
search_space = get_experiment().search_space
for obs_ft, expected_obs_ft in cases:
actual_obs_ft = clamp_observation_features([obs_ft], search_space)
self.assertEqual(actual_obs_ft[0], expected_obs_ft)
| 38.006116 | 88 | 0.59036 |
6675be61a28513a86bc4498c766112733231cf3a | 5,068 | py | Python | seahub/api2/endpoints/admin/default_library.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 420 | 2015-01-03T11:34:46.000Z | 2022-03-10T07:15:41.000Z | seahub/api2/endpoints/admin/default_library.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 735 | 2015-01-04T21:22:51.000Z | 2022-03-31T09:26:07.000Z | seahub/api2/endpoints/admin/default_library.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 379 | 2015-01-05T17:08:03.000Z | 2022-03-06T00:11:50.000Z | # Copyright (c) 2012-2016 Seafile Ltd.
import logging
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.utils.translation import ugettext as _
from seaserv import seafile_api
from seahub.options.models import UserOptions
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.base.accounts import User
from seahub.views import get_system_default_repo_id
logger = logging.getLogger(__name__)
class AdminDefaultLibrary(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsAdminUser,)
def create_default_repo(self, username):
default_repo_id = seafile_api.create_repo(name=_("My Library"),
desc=_("My Library"), username=username)
sys_repo_id = get_system_default_repo_id()
if not sys_repo_id or not seafile_api.get_repo(sys_repo_id):
return None
dirents = seafile_api.list_dir_by_path(sys_repo_id, '/')
for dirent in dirents:
obj_name = dirent.obj_name
seafile_api.copy_file(sys_repo_id, '/', obj_name,
default_repo_id, '/', obj_name, username, 0)
UserOptions.objects.set_default_repo(username, default_repo_id)
return default_repo_id
def get(self, request):
""" Get info of common user's default library.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_library():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
user_email = request.GET.get('user_email', None)
if not user_email:
error_msg = 'user_email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
User.objects.get(email=user_email)
except User.DoesNotExist:
error_msg = 'User %s not found.' % user_email
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# get default library info
try:
default_repo_id = UserOptions.objects.get_default_repo(user_email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
default_repo_info = {}
default_repo_info['user_email'] = user_email
if default_repo_id and seafile_api.get_repo(default_repo_id) is not None:
default_repo_info['exists'] = True
default_repo_info['repo_id'] = default_repo_id
else:
default_repo_info['exists'] = False
return Response(default_repo_info)
def post(self, request):
""" Create a default library for a common user.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_library():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
user_email = request.POST.get('user_email', None)
if not user_email:
error_msg = 'user_email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
common_user = User.objects.get(email=user_email)
except User.DoesNotExist:
error_msg = 'User %s not found.' % user_email
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not common_user.permissions.can_add_repo():
error_msg = 'Permission denied, %s can not create library.' % user_email
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# create default library for common use
try:
default_repo_id = UserOptions.objects.get_default_repo(user_email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
default_repo_info = {}
default_repo_info['user_email'] = user_email
default_repo_info['exists'] = True
try:
if default_repo_id and seafile_api.get_repo(default_repo_id) is not None:
default_repo_info['repo_id'] = default_repo_id
else:
new_default_repo_id = self.create_default_repo(user_email)
default_repo_info['repo_id'] = new_default_repo_id
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response(default_repo_info)
| 36.460432 | 85 | 0.676796 |
5847ea24d943806b27ff7bdd4735cb6381eea6b8 | 5,457 | py | Python | proto/py_out/p4/v1/p4runtime_pb2_grpc.py | miyachu/PI | ece01da781dfa9d9b3d4d77920cc70ba7df4d479 | [
"Apache-2.0"
] | null | null | null | proto/py_out/p4/v1/p4runtime_pb2_grpc.py | miyachu/PI | ece01da781dfa9d9b3d4d77920cc70ba7df4d479 | [
"Apache-2.0"
] | null | null | null | proto/py_out/p4/v1/p4runtime_pb2_grpc.py | miyachu/PI | ece01da781dfa9d9b3d4d77920cc70ba7df4d479 | [
"Apache-2.0"
] | 1 | 2021-11-11T07:05:45.000Z | 2021-11-11T07:05:45.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import p4.v1.p4runtime_pb2 as p4_dot_v1_dot_p4runtime__pb2
class P4RuntimeStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Write = channel.unary_unary(
'/p4.v1.P4Runtime/Write',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.WriteRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.WriteResponse.FromString,
)
self.Read = channel.unary_stream(
'/p4.v1.P4Runtime/Read',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.ReadRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.ReadResponse.FromString,
)
self.SetForwardingPipelineConfig = channel.unary_unary(
'/p4.v1.P4Runtime/SetForwardingPipelineConfig',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigResponse.FromString,
)
self.GetForwardingPipelineConfig = channel.unary_unary(
'/p4.v1.P4Runtime/GetForwardingPipelineConfig',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigResponse.FromString,
)
self.StreamChannel = channel.stream_stream(
'/p4.v1.P4Runtime/StreamChannel',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageResponse.FromString,
)
class P4RuntimeServicer(object):
def Write(self, request, context):
"""Update one or more P4 entities on the target.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Read(self, request, context):
"""Read one or more P4 entities from the target.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetForwardingPipelineConfig(self, request, context):
"""Sets the P4 forwarding-pipeline config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetForwardingPipelineConfig(self, request, context):
"""Gets the current P4 forwarding-pipeline config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamChannel(self, request_iterator, context):
"""Represents the bidirectional stream between the controller and the
switch (initiated by the controller), and is managed for the following
purposes:
- connection initiation through master arbitration
- indicating switch session liveness: the session is live when switch
sends a positive master arbitration update to the controller, and is
considered dead when either the stream breaks or the switch sends a
negative update for master arbitration
- the controller sending/receiving packets to/from the switch
- streaming of notifications from the switch
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_P4RuntimeServicer_to_server(servicer, server):
rpc_method_handlers = {
'Write': grpc.unary_unary_rpc_method_handler(
servicer.Write,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.WriteRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.WriteResponse.SerializeToString,
),
'Read': grpc.unary_stream_rpc_method_handler(
servicer.Read,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.ReadRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.ReadResponse.SerializeToString,
),
'SetForwardingPipelineConfig': grpc.unary_unary_rpc_method_handler(
servicer.SetForwardingPipelineConfig,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigResponse.SerializeToString,
),
'GetForwardingPipelineConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetForwardingPipelineConfig,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigResponse.SerializeToString,
),
'StreamChannel': grpc.stream_stream_rpc_method_handler(
servicer.StreamChannel,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'p4.v1.P4Runtime', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 45.475 | 113 | 0.770203 |
e89eea51a01e0f8103c024c6ab660044f6d8f93e | 993 | py | Python | venv/Lib/site-packages/tests/test_130_PrepExecuteSelectStmt.py | shehzadulislam/Assignment4 | a9cced70be6ae5d2685027d68032d5849f638301 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/tests/test_130_PrepExecuteSelectStmt.py | shehzadulislam/Assignment4 | a9cced70be6ae5d2685027d68032d5849f638301 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/tests/test_130_PrepExecuteSelectStmt.py | shehzadulislam/Assignment4 | a9cced70be6ae5d2685027d68032d5849f638301 | [
"Apache-2.0"
] | null | null | null | #
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_130_PrepExecuteSelectStmt(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_130)
def run_test_130(self):
conn = ibm_db.connect(config.database, config.user, config.password)
if conn:
stmt = ibm_db.prepare(conn, "SELECT id, breed, name, weight FROM animals WHERE id = 0")
if ibm_db.execute(stmt):
row = ibm_db.fetch_tuple(stmt)
while ( row ):
for i in row:
print(i)
row = ibm_db.fetch_tuple(stmt)
else:
print("Connection failed.")
#__END__
#__LUW_EXPECTED__
#0
#cat
#Pook
#3.20
#__ZOS_EXPECTED__
#0
#cat
#Pook
#3.20
#__SYSTEMI_EXPECTED__
#0
#cat
#Pook
#3.20
#__IDS_EXPECTED__
#0
#cat
#Pook
#3.20
| 18.388889 | 93 | 0.642497 |
64edb6a189abc5f78fe4741c40bf9384d9cded94 | 8,797 | py | Python | tests/test_futures.py | hoodmane/loky | 00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b | [
"BSD-3-Clause"
] | 248 | 2016-10-05T21:28:32.000Z | 2022-03-12T01:55:33.000Z | tests/test_futures.py | hoodmane/loky | 00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b | [
"BSD-3-Clause"
] | 169 | 2016-10-03T17:31:33.000Z | 2020-01-15T13:53:39.000Z | tests/test_futures.py | hoodmane/loky | 00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b | [
"BSD-3-Clause"
] | 27 | 2016-10-01T14:08:09.000Z | 2021-01-13T18:24:02.000Z | import sys
import time
import pytest
import threading
from loky._base import (PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED, Future)
from .utils import captured_stderr
if sys.version_info[:2] < (3, 3):
import loky._base as futures
else:
# This makes sure of the compatibility of the Error raised by loky with
# the ones from concurrent.futures
from concurrent import futures
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
class TestsFuture:
def test_done_callback_with_result(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
assert 5 == callback_result[0]
def test_done_callback_with_exception(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
assert ('test',) == callback_exception[0].args
def test_done_callback_with_cancel(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
assert f.cancel()
assert was_cancelled[0]
def test_done_callback_raises(self):
with captured_stderr() as stderr:
import logging
log = logging.getLogger("concurrent.futures")
log.addHandler(logging.StreamHandler())
raising_was_called = [False]
fn_was_called = [False]
def raising_fn(callback_future):
raising_was_called[0] = True
raise Exception('foobar')
def fn(callback_future):
fn_was_called[0] = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
assert raising_was_called[0]
assert fn_was_called[0]
assert 'Exception: foobar' in stderr.getvalue()
del log.handlers[:]
def test_done_callback_already_successful(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
assert 5 == callback_result[0]
def test_done_callback_already_failed(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
assert ('test',) == callback_exception[0].args
def test_done_callback_already_cancelled(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
assert f.cancel()
f.add_done_callback(fn)
assert was_cancelled[0]
def test_repr(self):
import re
assert re.match('<Future at 0x[0-9a-f]+ state=pending>',
repr(PENDING_FUTURE)).pos > -1
assert re.match('<Future at 0x[0-9a-f]+ state=running>',
repr(RUNNING_FUTURE)).pos > -1
assert re.match('<Future at 0x[0-9a-f]+ state=cancelled>',
repr(CANCELLED_FUTURE)).pos > -1
assert re.match('<Future at 0x[0-9a-f]+ state=cancelled>',
repr(CANCELLED_AND_NOTIFIED_FUTURE)).pos > -1
assert re.match('<Future at 0x[0-9a-f]+ state=finished raised '
'OSError>', repr(EXCEPTION_FUTURE)).pos > -1
assert re.match('<Future at 0x[0-9a-f]+ state=finished returned int>',
repr(SUCCESSFUL_FUTURE)).pos > -1
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
assert f1.cancel()
assert f1._state == CANCELLED
assert not f2.cancel()
assert f2._state == RUNNING
assert f3.cancel()
assert f3._state == CANCELLED
assert f4.cancel()
assert f4._state == CANCELLED_AND_NOTIFIED
assert not f5.cancel()
assert f5._state == FINISHED
assert not f6.cancel()
assert f6._state == FINISHED
def test_cancelled(self):
assert not PENDING_FUTURE.cancelled()
assert not RUNNING_FUTURE.cancelled()
assert CANCELLED_FUTURE.cancelled()
assert CANCELLED_AND_NOTIFIED_FUTURE.cancelled()
assert not EXCEPTION_FUTURE.cancelled()
assert not SUCCESSFUL_FUTURE.cancelled()
def test_done(self):
assert not PENDING_FUTURE.done()
assert not RUNNING_FUTURE.done()
assert CANCELLED_FUTURE.done()
assert CANCELLED_AND_NOTIFIED_FUTURE.done()
assert EXCEPTION_FUTURE.done()
assert SUCCESSFUL_FUTURE.done()
def test_running(self):
assert not PENDING_FUTURE.running()
assert RUNNING_FUTURE.running()
assert not CANCELLED_FUTURE.running()
assert not CANCELLED_AND_NOTIFIED_FUTURE.running()
assert not EXCEPTION_FUTURE.running()
assert not SUCCESSFUL_FUTURE.running()
def test_result_with_timeout(self):
with pytest.raises(futures.TimeoutError):
PENDING_FUTURE.result(timeout=0)
with pytest.raises(futures.TimeoutError):
RUNNING_FUTURE.result(timeout=0)
with pytest.raises(futures.CancelledError):
CANCELLED_FUTURE.result(timeout=0)
with pytest.raises(futures.CancelledError):
CANCELLED_AND_NOTIFIED_FUTURE.result(timeout=0)
with pytest.raises(OSError):
EXCEPTION_FUTURE.result(timeout=0)
assert SUCCESSFUL_FUTURE.result(timeout=0) == 42
def test_result_with_success(self):
def notification(ready):
# Wait until the main thread is waiting for the result.
ready.wait(1)
time.sleep(.1)
f1.set_result(42)
ready = threading.Event()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification, args=(ready,))
t.start()
ready.set()
assert f1.result(timeout=5) == 42
def test_result_with_cancel(self):
def notification(ready):
# Wait until the main thread is waiting for the result.
ready.wait(1)
time.sleep(.1)
f1.cancel()
ready = threading.Event()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification, args=(ready,))
t.start()
ready.set()
with pytest.raises(futures.CancelledError):
f1.result(timeout=5)
def test_exception_with_timeout(self):
with pytest.raises(futures.TimeoutError):
PENDING_FUTURE.exception(timeout=0)
with pytest.raises(futures.TimeoutError):
RUNNING_FUTURE.exception(timeout=0)
with pytest.raises(futures.CancelledError):
CANCELLED_FUTURE.exception(timeout=0)
with pytest.raises(futures.CancelledError):
CANCELLED_AND_NOTIFIED_FUTURE.exception(timeout=0)
assert isinstance(EXCEPTION_FUTURE.exception(timeout=0), OSError)
assert SUCCESSFUL_FUTURE.exception(timeout=0) is None
def test_exception_with_success(self):
def notification(ready):
# Wait until the main thread is waiting for the exception.
ready.wait(1)
time.sleep(.1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
ready = threading.Event()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification, args=(ready,))
t.start()
ready.set()
assert isinstance(f1.exception(timeout=5), OSError)
| 33.448669 | 78 | 0.631011 |
0f11ecf2903f26aed2c8507506c45844f6ec6532 | 2,238 | py | Python | task/common/MyLogFileHandler.py | withcouragetol/codebee-10l | 2636b8fc1b456a85201b868201cf9c147d739031 | [
"Apache-2.0"
] | 6 | 2018-04-13T09:48:26.000Z | 2020-06-22T13:42:10.000Z | task/common/MyLogFileHandler.py | withcouragetol/codebee-10l | 2636b8fc1b456a85201b868201cf9c147d739031 | [
"Apache-2.0"
] | null | null | null | task/common/MyLogFileHandler.py | withcouragetol/codebee-10l | 2636b8fc1b456a85201b868201cf9c147d739031 | [
"Apache-2.0"
] | 2 | 2018-09-04T07:09:50.000Z | 2019-08-18T15:11:00.000Z | from logging import FileHandler
import os
import time
try:
import codecs
except ImportError:
codecs = None
class SafeFileHandler(FileHandler):
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.suffix = "%Y-%m-%d"
self.suffix_time = ""
def emit(self, record):
"""
Emit a record.
Always check time
"""
try:
if self.check_baseFilename(record):
self.build_baseFilename()
FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def check_baseFilename(self, record):
"""
Determine if builder should occur.
record is not used, as we are just comparing times,
but it is needed so the method signatures are the same
"""
timeTuple = time.localtime()
if self.suffix_time != time.strftime(self.suffix, timeTuple) or not \
os.path.exists(self.baseFilename+'.'+self.suffix_time):
return 1
else:
return 0
def build_baseFilename(self):
"""
do builder; in this case,
old time stamp is removed from filename and
a new time stamp is append to the filename
"""
if self.stream:
self.stream.close()
self.stream = None
# remove old suffix
if self.suffix_time != "":
index = self.baseFilename.find("."+self.suffix_time)
if index == -1:
index = self.baseFilename.rfind(".")
self.baseFilename = self.baseFilename[:index]
# add new suffix
currentTimeTuple = time.localtime()
self.suffix_time = time.strftime(self.suffix, currentTimeTuple)
self.baseFilename = self.baseFilename + "." + self.suffix_time
self.mode = 'a'
if not self.delay:
self.stream = self._open() | 29.064935 | 77 | 0.575514 |
2b1681dfc5a88bc3779005951258d36988d8c73a | 423 | py | Python | apps/blog/migrations/0006_post_slug.py | Almazishe/portfolio | a69deded88e761ae06bf817646609c5237afb045 | [
"MIT"
] | null | null | null | apps/blog/migrations/0006_post_slug.py | Almazishe/portfolio | a69deded88e761ae06bf817646609c5237afb045 | [
"MIT"
] | null | null | null | apps/blog/migrations/0006_post_slug.py | Almazishe/portfolio | a69deded88e761ae06bf817646609c5237afb045 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-16 04:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20210116_0726'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(max_length=255, null=True, unique=True, verbose_name='Slug'),
),
]
| 22.263158 | 96 | 0.607565 |
3fbcf798b1a2a73fb05c7178cfd5984de7ffd6c5 | 220,725 | py | Python | pywikibot/page.py | npdoty/pywikibot | d0687ce7035401ffb63f4a95d225c27ed9ba722b | [
"MIT"
] | null | null | null | pywikibot/page.py | npdoty/pywikibot | d0687ce7035401ffb63f4a95d225c27ed9ba722b | [
"MIT"
] | null | null | null | pywikibot/page.py | npdoty/pywikibot | d0687ce7035401ffb63f4a95d225c27ed9ba722b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Objects representing various types of MediaWiki, including Wikibase, pages.
This module also includes objects:
* Property: a type of semantic data.
* Claim: an instance of a semantic assertion.
* Revision: a single change to a wiki page.
* FileInfo: a structure holding imageinfo of latest rev. of FilePage
* Link: an internal or interwiki link in wikitext.
"""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 3fbcf798b1a2a73fb05c7178cfd5984de7ffd6c5 $'
#
import hashlib
import logging
import os.path
import re
import sys
try:
import unicodedata2 as unicodedata
except ImportError:
import unicodedata
from collections import defaultdict, namedtuple
from warnings import warn
from pywikibot.tools import PY2
if not PY2:
unicode = basestring = str
long = int
from html import entities as htmlentitydefs
from urllib.parse import quote_from_bytes, unquote_to_bytes
else:
if __debug__ and not PY2:
unichr = NotImplemented # pyflakes workaround
chr = unichr
import htmlentitydefs
from urllib import quote as quote_from_bytes, unquote as unquote_to_bytes
import pywikibot
from pywikibot import config
from pywikibot import textlib
from pywikibot.comms import http
from pywikibot.exceptions import (
AutoblockUser,
NotEmailableError,
SiteDefinitionError,
UserRightsError,
)
from pywikibot.data.api import APIError
from pywikibot.family import Family
from pywikibot.site import DataSite, Namespace, need_version
from pywikibot.tools import (
compute_file_hash,
PYTHON_VERSION,
MediaWikiVersion, UnicodeMixin, ComparableMixin, DotReadableDict,
deprecated, deprecate_arg, deprecated_args, issue_deprecation_warning,
add_full_name, manage_wrapping,
ModuleDeprecationWrapper as _ModuleDeprecationWrapper,
first_upper, redirect_func, remove_last_args, _NotImplementedWarning,
OrderedDict, Counter,
)
from pywikibot.tools.ip import ip_regexp
from pywikibot.tools.ip import is_IP
__all__ = (
'BasePage',
'Page',
'FilePage',
'Category',
'User',
'WikibasePage',
'ItemPage',
'Property',
'PropertyPage',
'Claim',
'Revision',
'FileInfo',
'Link',
'html2unicode',
'UnicodeToAsciiHtml',
'unicode2html',
'url2unicode',
'ip_regexp', # unused & deprecated
)
logger = logging.getLogger("pywiki.wiki.page")
@add_full_name
def allow_asynchronous(func):
"""
Decorator to make it possible to run a BasePage method asynchronously.
This is done when the method is called with kwarg asynchronous=True.
Optionally, you can also provide kwarg callback, which, if provided, is
a callable that gets the page as the first and a possible exception that
occurred during saving in the second thread or None as the second argument.
"""
def handle(func, self, *args, **kwargs):
do_async = kwargs.pop('asynchronous', False)
callback = kwargs.pop('callback', None)
err = None
try:
func(self, *args, **kwargs)
# TODO: other "expected" error types to catch?
except pywikibot.Error as edit_err:
err = edit_err # edit_err will be deleted in the end of the scope
link = self.title(asLink=True)
pywikibot.log('Error saving page %s (%s)\n' % (link, err),
exc_info=True)
if not callback and not do_async:
if isinstance(err, pywikibot.PageSaveRelatedError):
raise err
raise pywikibot.OtherPageSaveError(self, err)
if callback:
callback(self, err)
def wrapper(self, *args, **kwargs):
if kwargs.get('asynchronous'):
pywikibot.async_request(handle, func, self, *args, **kwargs)
else:
handle(func, self, *args, **kwargs)
manage_wrapping(wrapper, func)
return wrapper
# Note: Link objects (defined later on) represent a wiki-page's title, while
# Page objects (defined here) represent the page itself, including its contents.
class BasePage(UnicodeMixin, ComparableMixin):
"""
BasePage: Base object for a MediaWiki page.
This object only implements internally methods that do not require
reading from or writing to the wiki. All other methods are delegated
to the Site object.
Will be subclassed by Page, WikibasePage, and FlowPage.
"""
_cache_attrs = (
'_text', '_pageid', '_catinfo', '_templates', '_protection',
'_contentmodel', '_langlinks', '_isredir', '_coords',
'_preloadedtext', '_timestamp', '_applicable_protections',
'_flowinfo', '_quality', '_pageprops', '_revid', '_quality_text',
'_pageimage'
)
def __init__(self, source, title=u"", ns=0):
"""
Instantiate a Page object.
Three calling formats are supported:
- If the first argument is a Page, create a copy of that object.
This can be used to convert an existing Page into a subclass
object, such as Category or FilePage. (If the title is also
given as the second argument, creates a copy with that title;
this is used when pages are moved.)
- If the first argument is a Site, create a Page on that Site
using the second argument as the title (may include a section),
and the third as the namespace number. The namespace number is
mandatory, even if the title includes the namespace prefix. This
is the preferred syntax when using an already-normalized title
obtained from api.php or a database dump. WARNING: may produce
invalid objects if page title isn't in normal form!
- If the first argument is a Link, create a Page from that link.
This is the preferred syntax when using a title scraped from
wikitext, URLs, or another non-normalized source.
@param source: the source of the page
@type source: Link, Page (or subclass), or Site
@param title: normalized title of the page; required if source is a
Site, ignored otherwise
@type title: unicode
@param ns: namespace number; required if source is a Site, ignored
otherwise
@type ns: int
"""
if title is None:
raise ValueError(u'Title cannot be None.')
if isinstance(source, pywikibot.site.BaseSite):
self._link = Link(title, source=source, defaultNamespace=ns)
self._revisions = {}
elif isinstance(source, Page):
# copy all of source's attributes to this object
# without overwriting non-None values
self.__dict__.update((k, v) for k, v in source.__dict__.items()
if k not in self.__dict__ or
self.__dict__[k] is None)
if title:
# overwrite title
self._link = Link(title, source=source.site,
defaultNamespace=ns)
elif isinstance(source, Link):
self._link = source
self._revisions = {}
else:
raise pywikibot.Error(
"Invalid argument type '%s' in Page constructor: %s"
% (type(source), source))
@property
def site(self):
"""Return the Site object for the wiki on which this Page resides."""
return self._link.site
def version(self):
"""
Return MediaWiki version number of the page site.
This is needed to use @need_version() decorator for methods of
Page objects.
"""
return self.site.version()
@property
def image_repository(self):
"""Return the Site object for the image repository."""
return self.site.image_repository()
@property
def data_repository(self):
"""Return the Site object for the data repository."""
return self.site.data_repository()
def namespace(self):
"""
Return the number of the namespace of the page.
@return: namespace of the page
@rtype: int
"""
return self._link.namespace
@property
def _namespace_obj(self):
"""Return the namespace object of the page."""
# TODO: T104864: Temporary until Page.namespace() is consistent
return self.site.namespaces[self.namespace()]
@property
def content_model(self):
"""
Return the content model for this page.
If it cannot be reliably determined via the API,
None is returned.
"""
# TODO: T102735: Add a sane default of 'wikitext' and others for <1.21
if not hasattr(self, '_contentmodel'):
self.site.loadpageinfo(self)
return self._contentmodel
@property
def depth(self):
"""Return the depth/subpage level of the page."""
if not hasattr(self, '_depth'):
# Check if the namespace allows subpages
if self._namespace_obj.subpages:
_depth = self.title().count('/')
else:
# Does not allow subpages, which means depth is always 0
_depth = 0
return _depth
@property
def pageid(self):
"""
Return pageid of the page.
@return: pageid or 0 if page does not exist
@rtype: int
"""
if not hasattr(self, '_pageid'):
self.site.loadpageinfo(self)
return self._pageid
@deprecated_args(decode=None, savetitle="asUrl")
def title(self, underscore=False, withNamespace=True,
withSection=True, asUrl=False, asLink=False,
allowInterwiki=True, forceInterwiki=False, textlink=False,
as_filename=False, insite=None):
"""
Return the title of this Page, as a Unicode string.
@param underscore: (not used with asLink) if true, replace all ' '
characters with '_'
@param withNamespace: if false, omit the namespace prefix. If this
option is false and used together with asLink return a labeled
link like [[link|label]]
@param withSection: if false, omit the section
@param asUrl: (not used with asLink) if true, quote title as if in an
URL
@param asLink: if true, return the title in the form of a wikilink
@param allowInterwiki: (only used if asLink is true) if true, format
the link as an interwiki link if necessary
@param forceInterwiki: (only used if asLink is true) if true, always
format the link as an interwiki link
@param textlink: (only used if asLink is true) if true, place a ':'
before Category: and Image: links
@param as_filename: (not used with asLink) if true, replace any
characters that are unsafe in filenames
@param insite: (only used if asLink is true) a site object where the
title is to be shown. default is the current family/lang given by
-family and -lang option i.e. config.family and config.mylang
@rtype: unicode
"""
title = self._link.canonical_title()
label = self._link.title
if withSection and self._link.section:
section = u"#" + self._link.section
else:
section = u''
if asLink:
if insite:
target_code = insite.code
target_family = insite.family.name
else:
target_code = config.mylang
target_family = config.family
if forceInterwiki or \
(allowInterwiki and
(self.site.family.name != target_family or
self.site.code != target_code)):
if self.site.family.name != target_family \
and self.site.family.name != self.site.code:
title = u'%s:%s:%s' % (self.site.family.name,
self.site.code,
title)
else:
# use this form for sites like commons, where the
# code is the same as the family name
title = u'%s:%s' % (self.site.code, title)
elif textlink and (self.is_filepage() or self.is_categorypage()):
title = u':%s' % title
elif self.namespace() == 0 and not section:
withNamespace = True
if withNamespace:
return u'[[%s%s]]' % (title, section)
else:
return u'[[%s%s|%s]]' % (title, section, label)
if not withNamespace and self.namespace() != 0:
title = label + section
else:
title += section
if underscore or asUrl:
title = title.replace(u' ', u'_')
if asUrl:
encodedTitle = title.encode(self.site.encoding())
title = quote_from_bytes(encodedTitle)
if as_filename:
# Replace characters that are not possible in file names on some
# systems.
# Spaces are possible on most systems, but are bad for URLs.
for forbidden in ':*?/\\ ':
title = title.replace(forbidden, '_')
return title
@remove_last_args(('decode', 'underscore'))
def section(self):
"""
Return the name of the section this Page refers to.
The section is the part of the title following a '#' character, if
any. If no section is present, return None.
@rtype: unicode
"""
return self._link.section
def __unicode__(self):
"""Return a unicode string representation."""
return self.title(asLink=True, forceInterwiki=True)
def __repr__(self):
"""Return a more complete string representation."""
if not PY2:
title = repr(self.title())
else:
try:
title = self.title().encode(config.console_encoding)
except UnicodeEncodeError:
# okay console encoding didn't work, at least try something
title = self.title().encode('unicode_escape')
return str('{0}({1})').format(self.__class__.__name__, title)
def _cmpkey(self):
"""
Key for comparison of Page objects.
Page objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Page objects are sortable by site, namespace then title.
"""
return (self.site, self.namespace(), self.title())
def __hash__(self):
"""
A stable identifier to be used as a key in hash-tables.
This relies on the fact that the string
representation of an instance can not change after the construction.
"""
return hash(unicode(self))
def full_url(self):
"""Return the full URL."""
return self.site.base_url(self.site.article_path +
self.title(asUrl=True))
def autoFormat(self):
"""
Return L{date.getAutoFormat} dictName and value, if any.
Value can be a year, date, etc., and dictName is 'YearBC',
'Year_December', or another dictionary name. Please note that two
entries may have exactly the same autoFormat, but be in two
different namespaces, as some sites have categories with the
same names. Regular titles return (None, None).
"""
if not hasattr(self, '_autoFormat'):
from pywikibot import date
self._autoFormat = date.getAutoFormat(
self.site.code,
self.title(withNamespace=False)
)
return self._autoFormat
def isAutoTitle(self):
"""Return True if title of this Page is in the autoFormat dictionary."""
return self.autoFormat()[0] is not None
@deprecated_args(throttle=None,
change_edit_time=None,
expandtemplates=None)
def get(self, force=False, get_redirect=False, sysop=False):
"""
Return the wiki-text of the page.
This will retrieve the page from the server if it has not been
retrieved yet, or if force is True. This can raise the following
exceptions that should be caught by the calling code:
@exception NoPage: The page does not exist
@exception IsRedirectPage: The page is a redirect. The argument of the
exception is the title of the page it
redirects to.
@exception SectionError: The section does not exist on a page with
a # link
@param force: reload all page attributes, including errors.
@param get_redirect: return the redirect text, do not follow the
redirect, do not raise an exception.
@param sysop: if the user has a sysop account, use it to
retrieve this page
@rtype: unicode
"""
if force:
del self.latest_revision_id
try:
self._getInternals(sysop)
except pywikibot.IsRedirectPage:
if not get_redirect:
raise
return self.latest_revision.text
def _latest_cached_revision(self):
"""Get the latest revision if cached and has text, otherwise None."""
if (hasattr(self, '_revid') and self._revid in self._revisions and
self._revisions[self._revid].text is not None):
return self._revisions[self._revid]
else:
return None
def _getInternals(self, sysop):
"""
Helper function for get().
Stores latest revision in self if it doesn't contain it, doesn't think.
* Raises exceptions from previous runs.
* Stores new exceptions in _getexception and raises them.
"""
# Raise exceptions from previous runs
if hasattr(self, '_getexception'):
raise self._getexception
# If not already stored, fetch revision
if self._latest_cached_revision() is None:
try:
self.site.loadrevisions(self, getText=True, sysop=sysop)
except (pywikibot.NoPage, pywikibot.SectionError) as e:
self._getexception = e
raise
# self._isredir is set by loadrevisions
if self._isredir:
self._getexception = pywikibot.IsRedirectPage(self)
raise self._getexception
@deprecated_args(throttle=None, change_edit_time=None)
def getOldVersion(self, oldid, force=False, get_redirect=False,
sysop=False):
"""
Return text of an old revision of this page; same options as get().
@param oldid: The revid of the revision desired.
@rtype: unicode
"""
if force or oldid not in self._revisions \
or self._revisions[oldid].text is None:
self.site.loadrevisions(self,
getText=True,
revids=oldid,
sysop=sysop)
# TODO: what about redirects, errors?
return self._revisions[oldid].text
def permalink(self, oldid=None, percent_encoded=True):
"""Return the permalink URL of an old revision of this page.
@param oldid: The revid of the revision desired.
@rtype: unicode
"""
if percent_encoded:
title = self.title(asUrl=True)
else:
title = self.title(asUrl=False).replace(' ', '_')
return "//%s%s/index.php?title=%s&oldid=%s" \
% (self.site.hostname(),
self.site.scriptpath(),
title,
(oldid if oldid is not None else self.latest_revision_id))
@property
def latest_revision_id(self):
"""Return the current revision id for this page."""
if not hasattr(self, '_revid'):
self.revisions(self)
return self._revid
@latest_revision_id.deleter
def latest_revision_id(self):
"""
Remove the latest revision id set for this Page.
All internal cached values specifically for the latest revision
of this page are cleared.
The following cached values are not cleared:
- text property
- page properties, and page coordinates
- lastNonBotUser
- isDisambig and isCategoryRedirect status
- langlinks, templates and deleted revisions
"""
# When forcing, we retry the page no matter what:
# * Old exceptions do not apply any more
# * Deleting _revid to force reload
# * Deleting _redirtarget, that info is now obsolete.
for attr in ['_redirtarget', '_getexception', '_revid']:
if hasattr(self, attr):
delattr(self, attr)
@latest_revision_id.setter
def latest_revision_id(self, value):
"""Set the latest revision for this Page."""
del self.latest_revision_id
self._revid = value
@deprecated('latest_revision_id')
def latestRevision(self):
"""Return the current revision id for this page."""
return self.latest_revision_id
@deprecated('latest_revision_id')
def pageAPInfo(self):
"""Return the current revision id for this page."""
if self.isRedirectPage():
raise pywikibot.IsRedirectPage(self)
return self.latest_revision_id
@property
def latest_revision(self):
"""Return the current revision for this page."""
rev = self._latest_cached_revision()
if rev is not None:
return rev
return next(self.revisions(content=True, total=1))
@property
def text(self):
"""
Return the current (edited) wikitext, loading it if necessary.
@return: text of the page
@rtype: unicode
"""
if not hasattr(self, '_text') or self._text is None:
try:
self._text = self.get(get_redirect=True)
except pywikibot.NoPage:
# TODO: what other exceptions might be returned?
self._text = u""
return self._text
@text.setter
def text(self, value):
"""
Update the current (edited) wikitext.
@param value: New value or None
@param value: basestring
"""
self._text = None if value is None else unicode(value)
if hasattr(self, '_raw_extracted_templates'):
del self._raw_extracted_templates
@text.deleter
def text(self):
"""Delete the current (edited) wikitext."""
if hasattr(self, "_text"):
del self._text
if hasattr(self, '_expanded_text'):
del self._expanded_text
if hasattr(self, '_raw_extracted_templates'):
del self._raw_extracted_templates
def preloadText(self):
"""
The text returned by EditFormPreloadText.
See API module "info".
Application: on Wikisource wikis, text can be preloaded even if
a page does not exist, if an Index page is present.
@rtype: unicode
"""
self.site.loadpageinfo(self, preload=True)
return self._preloadedtext
def _get_parsed_page(self):
"""Retrieve parsed text (via action=parse) and cache it."""
# Get (cached) parsed text.
if not hasattr(self, '_parsed_text'):
self._parsed_text = self.site.get_parsed_page(self)
return self._parsed_text
def properties(self, force=False):
"""
Return the properties of the page.
@param force: force updating from the live site
@rtype: dict
"""
if not hasattr(self, '_pageprops') or force:
self._pageprops = {} # page may not have pageprops (see bug T56868)
self.site.loadpageprops(self)
return self._pageprops
def defaultsort(self, force=False):
"""
Extract value of the {{DEFAULTSORT:}} magic word from the page.
@param force: force updating from the live site
@rtype: unicode or None
"""
return self.properties(force=force).get('defaultsort')
@deprecate_arg('refresh', 'force')
def expand_text(self, force=False, includecomments=False):
"""Return the page text with all templates and parser words expanded.
@param force: force updating from the live site
@param includecomments: Also strip comments if includecomments
parameter is not True.
@rtype unicode or None
"""
if not hasattr(self, '_expanded_text') or (
self._expanded_text is None) or force:
if not self.text:
self._expanded_text = ''
return ''
self._expanded_text = self.site.expand_text(
self.text,
title=self.title(withSection=False),
includecomments=includecomments)
return self._expanded_text
def userName(self):
"""
Return name or IP address of last user to edit page.
@rtype: unicode
"""
return self.latest_revision.user
def isIpEdit(self):
"""
Return True if last editor was unregistered.
@rtype: bool
"""
return self.latest_revision.anon
def lastNonBotUser(self):
"""
Return name or IP address of last human/non-bot user to edit page.
Determine the most recent human editor out of the last revisions.
If it was not able to retrieve a human user, returns None.
If the edit was done by a bot which is no longer flagged as 'bot',
i.e. which is not returned by Site.botusers(), it will be returned
as a non-bot edit.
@rtype: unicode
"""
if hasattr(self, '_lastNonBotUser'):
return self._lastNonBotUser
self._lastNonBotUser = None
for entry in self.revisions():
if entry.user and (not self.site.isBot(entry.user)):
self._lastNonBotUser = entry.user
break
return self._lastNonBotUser
@remove_last_args(('datetime', ))
def editTime(self):
"""Return timestamp of last revision to page.
@rtype: pywikibot.Timestamp
"""
return self.latest_revision.timestamp
@property
@deprecated('latest_revision.parent_id (0 instead of -1 when no parent)')
def previous_revision_id(self):
"""
Return the revision id for the previous revision of this Page.
If the page has only one revision, it shall return -1.
@rtype: long
@raise AssertionError: Use on MediaWiki prior to v1.16.
"""
return self.latest_revision.parent_id or -1
@deprecated('latest_revision.parent_id (0 instead of -1 when no parent)')
def previousRevision(self):
"""
Return the revision id for the previous revision.
DEPRECATED: Use latest_revision.parent_id instead.
@rtype: long
@raise AssertionError: Use on MediaWiki prior to v1.16.
"""
return self.latest_revision.parent_id or -1
def exists(self):
"""Return True if page exists on the wiki, even if it's a redirect.
If the title includes a section, return False if this section isn't
found.
@rtype: bool
"""
return self.site.page_exists(self)
@property
def oldest_revision(self):
"""
Return the first revision of this page.
@rtype: L{Revision}
"""
return next(self.revisions(reverse=True, total=1))
def isRedirectPage(self):
"""Return True if this is a redirect, False if not or not existing."""
return self.site.page_isredirect(self)
def isStaticRedirect(self, force=False):
"""
Determine whether the page is a static redirect.
A static redirect must be a valid redirect, and contain the magic word
__STATICREDIRECT__.
@param force: Bypass local caching
@type force: bool
@rtype: bool
"""
found = False
if self.isRedirectPage():
staticKeys = self.site.getmagicwords('staticredirect')
text = self.get(get_redirect=True, force=force)
if staticKeys:
for key in staticKeys:
if key in text:
found = True
break
return found
def isCategoryRedirect(self):
"""
Return True if this is a category redirect page, False otherwise.
@rtype: bool
"""
if not self.is_categorypage():
return False
if not hasattr(self, "_catredirect"):
catredirs = self.site.category_redirects()
for (template, args) in self.templatesWithParams():
if template.title(withNamespace=False) in catredirs:
# Get target (first template argument)
try:
p = pywikibot.Page(self.site, args[0].strip(), ns=14)
if p.namespace() == 14:
self._catredirect = p.title()
else:
pywikibot.warning(
u"Target %s on %s is not a category"
% (p.title(asLink=True),
self.title(asLink=True)))
self._catredirect = False
except IndexError:
pywikibot.warning(
u"No target for category redirect on %s"
% self.title(asLink=True))
self._catredirect = False
break
else:
self._catredirect = False
return bool(self._catredirect)
def getCategoryRedirectTarget(self):
"""
If this is a category redirect, return the target category title.
@rtype: Category
"""
if self.isCategoryRedirect():
return Category(Link(self._catredirect, self.site))
raise pywikibot.IsNotRedirectPage(self)
@deprecated("interwiki.page_empty_check(page)")
def isEmpty(self):
"""
Return True if the page text has less than 4 characters.
Character count ignores language links and category links.
Can raise the same exceptions as get().
@rtype: bool
"""
txt = self.get()
txt = textlib.removeLanguageLinks(txt, site=self.site)
txt = textlib.removeCategoryLinks(txt, site=self.site)
return len(txt) < 4
def isTalkPage(self):
"""Return True if this page is in any talk namespace."""
ns = self.namespace()
return ns >= 0 and ns % 2 == 1
def toggleTalkPage(self):
"""
Return other member of the article-talk page pair for this Page.
If self is a talk page, returns the associated content page;
otherwise, returns the associated talk page. The returned page need
not actually exist on the wiki.
@return: Page or None if self is a special page.
@rtype: Page or None
"""
ns = self.namespace()
if ns < 0: # Special page
return
if self.isTalkPage():
if self.namespace() == 1:
return Page(self.site, self.title(withNamespace=False))
else:
return Page(self.site,
"%s:%s" % (self.site.namespace(ns - 1),
self.title(withNamespace=False)))
else:
return Page(self.site,
"%s:%s" % (self.site.namespace(ns + 1),
self.title(withNamespace=False)))
def is_categorypage(self):
"""Return True if the page is a Category, False otherwise."""
return self.namespace() == 14
@deprecated('is_categorypage')
def isCategory(self):
"""DEPRECATED: use is_categorypage instead."""
return self.is_categorypage()
def is_filepage(self):
"""Return True if this is an file description page, False otherwise."""
return self.namespace() == 6
@deprecated('is_filepage')
def isImage(self):
"""DEPRECATED: use is_filepage instead."""
return self.is_filepage()
@remove_last_args(('get_Index', ))
def isDisambig(self):
"""
Return True if this is a disambiguation page, False otherwise.
By default, it uses the the Disambiguator extension's result. The
identification relies on the presense of the __DISAMBIG__ magic word
which may also be transcluded.
If the Disambiguator extension isn't activated for the given site,
the identification relies on the presence of specific templates.
First load a list of template names from the Family file;
if the value in the Family file is None or no entry was made, look for
the list on [[MediaWiki:Disambiguationspage]]. If this page does not
exist, take the MediaWiki message. 'Template:Disambig' is always
assumed to be default, and will be appended regardless of its existence.
@rtype: bool
"""
if self.site.has_extension('Disambiguator'):
# If the Disambiguator extension is loaded, use it
return 'disambiguation' in self.properties()
if not hasattr(self.site, "_disambigtemplates"):
try:
default = set(self.site.family.disambig('_default'))
except KeyError:
default = set([u'Disambig'])
try:
distl = self.site.family.disambig(self.site.code,
fallback=False)
except KeyError:
distl = None
if distl is None:
disambigpages = Page(self.site,
"MediaWiki:Disambiguationspage")
if disambigpages.exists():
disambigs = set(link.title(withNamespace=False)
for link in disambigpages.linkedPages()
if link.namespace() == 10)
elif self.site.has_mediawiki_message('disambiguationspage'):
message = self.site.mediawiki_message(
'disambiguationspage').split(':', 1)[1]
# add the default template(s) for default mw message
# only
disambigs = set([first_upper(message)]) | default
else:
disambigs = default
self.site._disambigtemplates = disambigs
else:
# Normalize template capitalization
self.site._disambigtemplates = set(
first_upper(t) for t in distl
)
templates = set(tl.title(withNamespace=False)
for tl in self.templates())
disambigs = set()
# always use cached disambig templates
disambigs.update(self.site._disambigtemplates)
# see if any template on this page is in the set of disambigs
disambigInPage = disambigs.intersection(templates)
return self.namespace() != 10 and len(disambigInPage) > 0
@deprecated_args(step=None)
def getReferences(self, follow_redirects=True, withTemplateInclusion=True,
onlyTemplateInclusion=False, redirectsOnly=False,
namespaces=None, total=None, content=False):
"""
Return an iterator all pages that refer to or embed the page.
If you need a full list of referring pages, use
C{pages = list(s.getReferences())}
@param follow_redirects: if True, also iterate pages that link to a
redirect pointing to the page.
@param withTemplateInclusion: if True, also iterate pages where self
is used as a template.
@param onlyTemplateInclusion: if True, only iterate pages where self
is used as a template.
@param redirectsOnly: if True, only iterate redirects to self.
@param namespaces: only iterate pages in these namespaces
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each referring page (default False)
"""
# N.B.: this method intentionally overlaps with backlinks() and
# embeddedin(). Depending on the interface, it may be more efficient
# to implement those methods in the site interface and then combine
# the results for this method, or to implement this method and then
# split up the results for the others.
return self.site.pagereferences(
self,
followRedirects=follow_redirects,
filterRedirects=redirectsOnly,
withTemplateInclusion=withTemplateInclusion,
onlyTemplateInclusion=onlyTemplateInclusion,
namespaces=namespaces,
total=total,
content=content
)
@deprecated_args(step=None)
def backlinks(self, followRedirects=True, filterRedirects=None,
namespaces=None, total=None, content=False):
"""
Return an iterator for pages that link to this page.
@param followRedirects: if True, also iterate pages that link to a
redirect pointing to the page.
@param filterRedirects: if True, only iterate redirects; if False,
omit redirects; if None, do not filter
@param namespaces: only iterate pages in these namespaces
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each referring page (default False)
"""
return self.site.pagebacklinks(
self,
followRedirects=followRedirects,
filterRedirects=filterRedirects,
namespaces=namespaces,
total=total,
content=content
)
@deprecated_args(step=None)
def embeddedin(self, filter_redirects=None, namespaces=None,
total=None, content=False):
"""
Return an iterator for pages that embed this page as a template.
@param filter_redirects: if True, only iterate redirects; if False,
omit redirects; if None, do not filter
@param namespaces: only iterate pages in these namespaces
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each embedding page (default False)
"""
return self.site.page_embeddedin(
self,
filterRedirects=filter_redirects,
namespaces=namespaces,
total=total,
content=content
)
def protection(self):
"""
Return a dictionary reflecting page protections.
@rtype: dict
"""
return self.site.page_restrictions(self)
def applicable_protections(self):
"""
Return the protection types allowed for that page.
If the page doesn't exists it only returns "create". Otherwise it
returns all protection types provided by the site, except "create".
It also removes "upload" if that page is not in the File namespace.
It is possible, that it returns an empty set, but only if original
protection types were removed.
@return: set of unicode
@rtype: set
"""
# New API since commit 32083235eb332c419df2063cf966b3400be7ee8a
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion('1.25wmf14'):
self.site.loadpageinfo(self)
return self._applicable_protections
p_types = set(self.site.protection_types())
if not self.exists():
return set(['create']) if 'create' in p_types else set()
else:
p_types.remove('create') # no existing page allows that
if not self.is_filepage(): # only file pages allow upload
p_types.remove('upload')
return p_types
def canBeEdited(self):
"""
Determine whether the page may be edited.
This returns True if and only if:
- page is unprotected, and bot has an account for this site, or
- page is protected, and bot has a sysop account for this site.
@rtype: bool
"""
return self.site.page_can_be_edited(self)
def botMayEdit(self):
"""
Determine whether the active bot is allowed to edit the page.
This will be True if the page doesn't contain {{bots}} or
{{nobots}}, or it contains them and the active bot is allowed to
edit this page. (This method is only useful on those sites that
recognize the bot-exclusion protocol; on other sites, it will always
return True.)
The framework enforces this restriction by default. It is possible
to override this by setting ignore_bot_templates=True in
user-config.py, or using page.put(force=True).
@rtype: bool
"""
# TODO: move this to Site object?
# FIXME: templatesWithParams is defined in Page only.
if not hasattr(self, 'templatesWithParams'):
return True
if config.ignore_bot_templates: # Check the "master ignore switch"
return True
username = self.site.user()
try:
templates = self.templatesWithParams()
except (pywikibot.NoPage,
pywikibot.IsRedirectPage,
pywikibot.SectionError):
return True
# go through all templates and look for any restriction
# multiple bots/nobots templates are allowed
for template in templates:
title = template[0].title(withNamespace=False)
if title == 'Nobots':
if len(template[1]) == 0:
return False
else:
bots = template[1][0].split(',')
if 'all' in bots or pywikibot.calledModuleName() in bots \
or username in bots:
return False
elif title == 'Bots':
if len(template[1]) == 0:
return True
else:
(ttype, bots) = template[1][0].split('=', 1)
bots = bots.split(',')
if ttype == 'allow':
return 'all' in bots or username in bots
if ttype == 'deny':
return not ('all' in bots or username in bots)
if ttype == 'allowscript':
return 'all' in bots or pywikibot.calledModuleName() in bots
if ttype == 'denyscript':
return not ('all' in bots or pywikibot.calledModuleName() in bots)
# no restricting template found
return True
@deprecate_arg('async', 'asynchronous') # T106230
@deprecated_args(comment='summary', sysop=None)
def save(self, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None,
apply_cosmetic_changes=None, quiet=False, **kwargs):
"""
Save the current contents of page's text to the wiki.
@param summary: The edit summary for the modification (optional, but
most wikis strongly encourage its use)
@type summary: unicode
@param watch: Specify how the watchlist is affected by this edit, set
to one of "watch", "unwatch", "preferences", "nochange":
* watch: add the page to the watchlist
* unwatch: remove the page from the watchlist
* preferences: use the preference settings (Default)
* nochange: don't change the watchlist
If None (default), follow bot account's default settings
For backward compatibility watch parameter may also be boolean:
if True, add or if False, remove this Page to/from bot
user's watchlist.
@type watch: string, bool (deprecated) or None
@param minor: if True, mark this edit as minor
@type minor: bool
@param botflag: if True, mark this edit as made by a bot (default:
True if user has bot status, False if not)
@param force: if True, ignore botMayEdit() setting
@type force: bool
@param asynchronous: if True, launch a separate thread to save
asynchronously
@param callback: a callable object that will be called after the
page put operation. This object must take two arguments: (1) a
Page object, and (2) an exception instance, which will be None
if the page was saved successfully. The callback is intended for
use by bots that need to keep track of which saves were
successful.
@param apply_cosmetic_changes: Overwrites the cosmetic_changes
configuration value to this value unless it's None.
@type apply_cosmetic_changes: bool or None
@param quiet: enable/disable successful save operation message;
defaults to False.
In asynchronous mode, if True, it is up to the calling bot to
manage the output e.g. via callback.
@type quiet: bool
"""
if not summary:
summary = config.default_edit_summary
if watch is True:
watch = 'watch'
elif watch is False:
watch = 'unwatch'
if not force and not self.botMayEdit():
raise pywikibot.OtherPageSaveError(
self, "Editing restricted by {{bots}} template")
self._save(summary=summary, watch=watch, minor=minor, botflag=botflag,
asynchronous=asynchronous, callback=callback,
cc=apply_cosmetic_changes, quiet=quiet, **kwargs)
@allow_asynchronous
def _save(self, summary=None, watch=None, minor=True, botflag=None,
cc=None, quiet=False, **kwargs):
"""Helper function for save()."""
link = self.title(asLink=True)
if cc or cc is None and config.cosmetic_changes:
summary = self._cosmetic_changes_hook(summary)
done = self.site.editpage(self, summary=summary, minor=minor,
watch=watch, bot=botflag, **kwargs)
if not done:
if not quiet:
pywikibot.warning('Page %s not saved' % link)
raise pywikibot.PageNotSaved(self)
if not quiet:
pywikibot.output('Page %s saved' % link)
def _cosmetic_changes_hook(self, summary):
"""The cosmetic changes hook.
@param summary: The current edit summary.
@type summary: str
@return: Modified edit summary if cosmetic changes has been done,
else the old edit summary.
@rtype: str
"""
if self.isTalkPage() or \
pywikibot.calledModuleName() in config.cosmetic_changes_deny_script:
return summary
family = self.site.family.name
if config.cosmetic_changes_mylang_only:
cc = ((family == config.family and
self.site.lang == config.mylang) or
family in list(config.cosmetic_changes_enable.keys()) and
self.site.lang in config.cosmetic_changes_enable[family])
else:
cc = True
cc = (cc and not
(family in list(config.cosmetic_changes_disable.keys()) and
self.site.lang in config.cosmetic_changes_disable[family]))
if not cc:
return summary
old = self.text
pywikibot.log(u'Cosmetic changes for %s-%s enabled.'
% (family, self.site.lang))
# cc depends on page directly and via several other imports
from pywikibot.cosmetic_changes import (
CANCEL_MATCH, CosmeticChangesToolkit) # noqa
ccToolkit = CosmeticChangesToolkit(self.site,
namespace=self.namespace(),
pageTitle=self.title(),
ignore=CANCEL_MATCH)
self.text = ccToolkit.change(old)
if summary and old.strip().replace(
'\r\n', '\n') != self.text.strip().replace('\r\n', '\n'):
from pywikibot import i18n
summary += i18n.twtranslate(self.site, 'cosmetic_changes-append')
return summary
@deprecate_arg('async', 'asynchronous') # T106230
@deprecated_args(comment='summary')
def put(self, newtext, summary=u'', watchArticle=None, minorEdit=True,
botflag=None, force=False, asynchronous=False, callback=None, **kwargs):
"""
Save the page with the contents of the first argument as the text.
This method is maintained primarily for backwards-compatibility.
For new code, using Page.save() is preferred. See save() method
docs for all parameters not listed here.
@param newtext: The complete text of the revised page.
@type newtext: unicode
"""
self.text = newtext
self.save(summary=summary, watch=watchArticle, minor=minorEdit,
botflag=botflag, force=force, asynchronous=asynchronous,
callback=callback, **kwargs)
@deprecated_args(comment='summary')
def put_async(self, newtext, summary=u'', watchArticle=None,
minorEdit=True, botflag=None, force=False, callback=None,
**kwargs):
"""
Put page on queue to be saved to wiki asynchronously.
Asynchronous version of put (takes the same arguments), which places
pages on a queue to be saved by a daemon thread. All arguments are
the same as for .put(). This version is maintained solely for
backwards-compatibility.
"""
self.put(newtext, summary=summary, watchArticle=watchArticle,
minorEdit=minorEdit, botflag=botflag, force=force,
asynchronous=True, callback=callback, **kwargs)
def watch(self, unwatch=False):
"""
Add or remove this page to/from bot account's watchlist.
@param unwatch: True to unwatch, False (default) to watch.
@type unwatch: bool
@return: True if successful, False otherwise.
@rtype: bool
"""
return self.site.watch(self, unwatch)
def clear_cache(self):
"""Clear the cached attributes of the page."""
self._revisions = {}
for attr in self._cache_attrs:
try:
delattr(self, attr)
except AttributeError:
pass
def purge(self, **kwargs):
"""
Purge the server's cache for this page.
@rtype: bool
"""
self.clear_cache()
return self.site.purgepages([self], **kwargs)
def touch(self, callback=None, botflag=False, **kwargs):
"""
Make a touch edit for this page.
See save() method docs for all parameters.
The following parameters will be overridden by this method:
- summary, watch, minor, force, asynchronous
Parameter botflag is False by default.
minor and botflag parameters are set to False which prevents hiding
the edit when it becomes a real edit due to a bug.
"""
if self.exists():
# ensure always get the page text and not to change it.
del self.text
self.save(summary='Pywikibot touch edit', watch='nochange',
minor=False, botflag=botflag, force=True,
asynchronous=False, callback=callback,
apply_cosmetic_changes=False, **kwargs)
else:
raise pywikibot.NoPage(self)
@deprecated_args(step=None)
def linkedPages(self, namespaces=None, total=None,
content=False):
"""
Iterate Pages that this Page links to.
Only returns pages from "normal" internal links. Image and category
links are omitted unless prefixed with ":". Embedded templates are
omitted (but links within them are returned). All interwiki and
external links are omitted.
@param namespaces: only iterate links in these namespaces
@param namespaces: int, or list of ints
@param total: iterate no more than this number of pages in total
@type total: int
@param content: if True, retrieve the content of the current version
of each linked page (default False)
@type content: bool
@return: a generator that yields Page objects.
@rtype: generator
"""
return self.site.pagelinks(self, namespaces=namespaces,
total=total, content=content)
def interwiki(self, expand=True):
"""
Iterate interwiki links in the page text, excluding language links.
@param expand: if True (default), include interwiki links found in
templates transcluded onto this page; if False, only iterate
interwiki links found in this page's own wikitext
@type expand: bool
@return: a generator that yields Link objects
@rtype: generator
"""
# This function does not exist in the API, so it has to be
# implemented by screen-scraping
if expand:
text = self.expand_text()
else:
text = self.text
for linkmatch in pywikibot.link_regex.finditer(
textlib.removeDisabledParts(text)):
linktitle = linkmatch.group("title")
link = Link(linktitle, self.site)
# only yield links that are to a different site and that
# are not language links
try:
if link.site != self.site:
if linktitle.lstrip().startswith(":"):
# initial ":" indicates not a language link
yield link
elif link.site.family != self.site.family:
# link to a different family is not a language link
yield link
except pywikibot.Error:
# ignore any links with invalid contents
continue
def langlinks(self, include_obsolete=False):
"""
Return a list of all inter-language Links on this page.
@param include_obsolete: if true, return even Link objects whose site
is obsolete
@type include_obsolete: bool
@return: list of Link objects.
@rtype: list
"""
# Note: We preload a list of *all* langlinks, including links to
# obsolete sites, and store that in self._langlinks. We then filter
# this list if the method was called with include_obsolete=False
# (which is the default)
if not hasattr(self, '_langlinks'):
self._langlinks = list(self.iterlanglinks(include_obsolete=True))
if include_obsolete:
return self._langlinks
else:
return [i for i in self._langlinks if not i.site.obsolete]
@deprecated_args(step=None)
def iterlanglinks(self, total=None, include_obsolete=False):
"""
Iterate all inter-language links on this page.
@param total: iterate no more than this number of pages in total
@param include_obsolete: if true, yield even Link object whose site
is obsolete
@type include_obsolete: bool
@return: a generator that yields Link objects.
@rtype: generator
"""
if hasattr(self, '_langlinks'):
return iter(self.langlinks(include_obsolete=include_obsolete))
# XXX We might want to fill _langlinks when the Site
# method is called. If we do this, we'll have to think
# about what will happen if the generator is not completely
# iterated upon.
return self.site.pagelanglinks(self, total=total,
include_obsolete=include_obsolete)
def data_item(self):
"""
Convenience function to get the Wikibase item of a page.
@rtype: ItemPage
"""
return ItemPage.fromPage(self)
@deprecate_arg('tllimit', None)
@deprecated("Page.templates()")
def getTemplates(self):
"""DEPRECATED. Use templates()."""
return self.templates()
def templates(self, content=False):
"""
Return a list of Page objects for templates used on this Page.
Template parameters are ignored. This method only returns embedded
templates, not template pages that happen to be referenced through
a normal link.
@param content: if True, retrieve the content of the current version
of each template (default False)
@param content: bool
"""
# Data might have been preloaded
if not hasattr(self, '_templates'):
self._templates = list(self.itertemplates(content=content))
return self._templates
@deprecated_args(step=None)
def itertemplates(self, total=None, content=False):
"""
Iterate Page objects for templates used on this Page.
Template parameters are ignored. This method only returns embedded
templates, not template pages that happen to be referenced through
a normal link.
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each template (default False)
@param content: bool
"""
if hasattr(self, '_templates'):
return iter(self._templates)
return self.site.pagetemplates(self, total=total, content=content)
@deprecated_args(followRedirects=None, loose=None, step=None)
def imagelinks(self, total=None, content=False):
"""
Iterate FilePage objects for images displayed on this Page.
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each image description page (default False)
@return: a generator that yields FilePage objects.
"""
return self.site.pageimages(self, total=total, content=content)
@deprecated_args(nofollow_redirects=None, get_redirect=None, step=None)
def categories(self, withSortKey=False, total=None, content=False):
"""
Iterate categories that the article is in.
@param withSortKey: if True, include the sort key in each Category.
@param total: iterate no more than this number of pages in total
@param content: if True, retrieve the content of the current version
of each category description page (default False)
@return: a generator that yields Category objects.
@rtype: generator
"""
# FIXME: bug T75561: withSortKey is ignored by Site.pagecategories
if withSortKey:
raise NotImplementedError('withSortKey is not implemented')
return self.site.pagecategories(self, total=total, content=content)
@deprecated_args(step=None)
def extlinks(self, total=None):
"""
Iterate all external URLs (not interwiki links) from this page.
@param total: iterate no more than this number of pages in total
@return: a generator that yields unicode objects containing URLs.
@rtype: generator
"""
return self.site.page_extlinks(self, total=total)
def coordinates(self, primary_only=False):
"""
Return a list of Coordinate objects for points on the page.
Uses the MediaWiki extension GeoData.
@param primary_only: Only return the coordinate indicated to be primary
@return: A list of Coordinate objects
@rtype: list
"""
if not hasattr(self, '_coords'):
self._coords = []
self.site.loadcoordinfo(self)
if primary_only:
return self._coords[0] if len(self._coords) > 0 else None
else:
return self._coords
@need_version('1.20')
def page_image(self):
"""
Return the most appropriate image on the page.
Uses the MediaWiki extension PageImages.
@return: A FilePage object
@rtype: FilePage
"""
if not hasattr(self, '_pageimage'):
self._pageimage = None
self.site.loadpageimage(self)
return self._pageimage
def getRedirectTarget(self):
"""
Return a Page object for the target this Page redirects to.
If this page is not a redirect page, will raise an IsNotRedirectPage
exception. This method also can raise a NoPage exception.
@rtype: pywikibot.Page
"""
return self.site.getredirtarget(self)
@deprecated('moved_target()')
def getMovedTarget(self):
"""
Return a Page object for the target this Page was moved to.
DEPRECATED: Use Page.moved_target().
If this page was not moved, it will raise a NoPage exception.
This method also works if the source was already deleted.
@rtype: pywikibot.Page
@raises NoPage: this page was not moved
"""
try:
return self.moved_target()
except pywikibot.NoMoveTarget:
raise pywikibot.NoPage(self)
def moved_target(self):
"""
Return a Page object for the target this Page was moved to.
If this page was not moved, it will raise a NoMoveTarget exception.
This method also works if the source was already deleted.
@rtype: pywikibot.Page
@raises NoMoveTarget: this page was not moved
"""
gen = iter(self.site.logevents(logtype='move', page=self, total=1))
try:
lastmove = next(gen)
except StopIteration:
raise pywikibot.NoMoveTarget(self)
else:
return lastmove.target_page
@deprecated_args(getText='content', reverseOrder='reverse', step=None)
def revisions(self, reverse=False, total=None, content=False,
rollback=False, starttime=None, endtime=None):
"""Generator which loads the version history as Revision instances."""
# TODO: Only request uncached revisions
self.site.loadrevisions(self, getText=content, rvdir=reverse,
starttime=starttime, endtime=endtime,
total=total, rollback=rollback)
return (self._revisions[rev] for rev in
sorted(self._revisions, reverse=not reverse)[:total])
# BREAKING CHANGE: in old framework, default value for getVersionHistory
# returned no more than 500 revisions; now, it iterates
# all revisions unless 'total' argument is used
@deprecated('Page.revisions()')
@deprecated_args(forceReload=None, revCount='total', step=None,
getAll=None, reverseOrder='reverse')
def getVersionHistory(self, reverse=False, total=None):
"""
Load the version history page and return history information.
Return value is a list of tuples, where each tuple represents one
edit and is built of revision id, edit date/time, user name, and
edit summary. Starts with the most current revision, unless
reverse is True.
@param total: iterate no more than this number of revisions in total
"""
return [rev.hist_entry()
for rev in self.revisions(reverse=reverse, total=total)
]
@deprecated_args(forceReload=None, reverseOrder='reverse', step=None)
def getVersionHistoryTable(self, reverse=False, total=None):
"""Return the version history as a wiki table."""
result = '{| class="wikitable"\n'
result += '! oldid || date/time || username || edit summary\n'
for entry in self.revisions(reverse=reverse, total=total):
result += '|----\n'
result += ('| {r.revid} || {r.timestamp} || {r.user} || '
'<nowiki>{r.comment}</nowiki>\n'.format(r=entry))
result += '|}\n'
return result
@deprecated("Page.revisions(content=True)")
@deprecated_args(reverseOrder='reverse', rollback=None, step=None)
def fullVersionHistory(self, reverse=False, total=None):
"""Iterate previous versions including wikitext.
Takes same arguments as getVersionHistory.
"""
return [rev.full_hist_entry()
for rev in self.revisions(content=True, reverse=reverse,
total=total)
]
@deprecated_args(step=None)
def contributors(self, total=None, starttime=None, endtime=None):
"""
Compile contributors of this page with edit counts.
@param total: iterate no more than this number of revisions in total
@param starttime: retrieve revisions starting at this Timestamp
@param endtime: retrieve revisions ending at this Timestamp
@return: number of edits for each username
@rtype: L{collections.Counter}
"""
return Counter(rev.user for rev in
self.revisions(total=total,
starttime=starttime, endtime=endtime))
@deprecated('contributors()')
@deprecated_args(step=None)
def contributingUsers(self, total=None):
"""
Return a set of usernames (or IPs) of users who edited this page.
@param total: iterate no more than this number of revisions in total
@rtype: set
"""
return self.contributors(total=total).keys()
def revision_count(self, contributors=None):
"""
Determine number of edits from a set of contributors.
@param contributors: contributor usernames
@type contributors: iterable of str
@return: number of edits for all provided usernames
@rtype: int
"""
if not contributors:
return len(list(self.revisions()))
cnt = self.contributors()
return sum(cnt[username] for username in contributors)
@deprecated('oldest_revision')
def getCreator(self):
"""
Get the first revision of the page.
DEPRECATED: Use Page.oldest_revision.
@rtype: tuple(username, Timestamp)
"""
result = self.oldest_revision
return result.user, unicode(result.timestamp.isoformat())
@deprecated('contributors() or revisions()')
@deprecated_args(limit="total")
def getLatestEditors(self, total=1):
"""
Get a list of revision informations of the last total edits.
DEPRECATED: Use Page.revisions.
@param total: iterate no more than this number of revisions in total
@return: list of dict, each dict containing the username and Timestamp
@rtype: list
"""
return [{'user': rev.user, 'timestamp': unicode(rev.timestamp.isoformat())}
for rev in self.revisions(total=total)]
def merge_history(self, dest, timestamp=None, reason=None):
"""
Merge revisions from this page into another page.
See L{APISite.merge_history} for details.
@param dest: Destination page to which revisions will be merged
@type dest: pywikibot.Page
@param timestamp: Revisions from this page dating up to this timestamp
will be merged into the destination page (if not given or False,
all revisions will be merged)
@type timestamp: pywikibot.Timestamp
@param reason: Optional reason for the history merge
@type reason: str
"""
self.site.merge_history(self, dest, timestamp, reason)
@deprecate_arg("throttle", None)
def move(self, newtitle, reason=None, movetalkpage=True, sysop=False,
deleteAndMove=False, safe=True):
"""
Move this page to a new title.
@param newtitle: The new page title.
@param reason: The edit summary for the move.
@param movetalkpage: If true, move this page's talk page (if it exists)
@param sysop: Try to move using sysop account, if available
@param deleteAndMove: if move succeeds, delete the old page
(usually requires sysop privileges, depending on wiki settings)
@param safe: If false, attempt to delete existing page at newtitle
(if there is one) and then move this page to that title
"""
if reason is None:
pywikibot.output(u'Moving %s to [[%s]].'
% (self.title(asLink=True), newtitle))
reason = pywikibot.input(u'Please enter a reason for the move:')
# TODO: implement "safe" parameter (Is this necessary ?)
# TODO: implement "sysop" parameter
return self.site.movepage(self, newtitle, reason,
movetalk=movetalkpage,
noredirect=deleteAndMove)
@deprecate_arg("throttle", None)
def delete(self, reason=None, prompt=True, mark=False, quit=False):
"""
Delete the page from the wiki. Requires administrator status.
@param reason: The edit summary for the deletion, or rationale
for deletion if requesting. If None, ask for it.
@param prompt: If true, prompt user for confirmation before deleting.
@param mark: If true, and user does not have sysop rights, place a
speedy-deletion request on the page instead. If false, non-sysops
will be asked before marking pages for deletion.
@param quit: show also the quit option, when asking for confirmation.
"""
if reason is None:
pywikibot.output(u'Deleting %s.' % (self.title(asLink=True)))
reason = pywikibot.input(u'Please enter a reason for the deletion:')
# If user is a sysop, delete the page
if self.site.username(sysop=True):
answer = u'y'
if prompt and not hasattr(self.site, '_noDeletePrompt'):
answer = pywikibot.input_choice(
u'Do you want to delete %s?' % self.title(
asLink=True, forceInterwiki=True),
[('Yes', 'y'), ('No', 'n'), ('All', 'a')],
'n', automatic_quit=quit)
if answer == 'a':
answer = 'y'
self.site._noDeletePrompt = True
if answer == 'y':
return self.site.deletepage(self, reason)
else: # Otherwise mark it for deletion
if mark or hasattr(self.site, '_noMarkDeletePrompt'):
answer = 'y'
else:
answer = pywikibot.input_choice(
u"Can't delete %s; do you want to mark it "
"for deletion instead?" % self.title(asLink=True,
forceInterwiki=True),
[('Yes', 'y'), ('No', 'n'), ('All', 'a')],
'n', automatic_quit=False)
if answer == 'a':
answer = 'y'
self.site._noMarkDeletePrompt = True
if answer == 'y':
template = '{{delete|1=%s}}\n' % reason
self.text = template + self.text
return self.save(summary=reason)
@deprecated_args(step=None)
def loadDeletedRevisions(self, total=None):
"""
Retrieve deleted revisions for this Page.
Stores all revisions' timestamps, dates, editors and comments in
self._deletedRevs attribute.
@return: iterator of timestamps (which can be used to retrieve
revisions later on).
@rtype: generator
"""
if not hasattr(self, "_deletedRevs"):
self._deletedRevs = {}
for item in self.site.deletedrevs(self, total=total):
for rev in item.get("revisions", []):
self._deletedRevs[rev['timestamp']] = rev
yield rev['timestamp']
def getDeletedRevision(self, timestamp, retrieveText=False):
"""
Return a particular deleted revision by timestamp.
@return: a list of [date, editor, comment, text, restoration
marker]. text will be None, unless retrieveText is True (or has
been retrieved earlier). If timestamp is not found, returns
None.
@rtype: list
"""
if hasattr(self, "_deletedRevs"):
if timestamp in self._deletedRevs and (
(not retrieveText) or
'content' in self._deletedRevs[timestamp]):
return self._deletedRevs[timestamp]
for item in self.site.deletedrevs(self, start=timestamp,
get_text=retrieveText, total=1):
# should only be one item with one revision
if item['title'] == self.title:
if "revisions" in item:
return item["revisions"][0]
def markDeletedRevision(self, timestamp, undelete=True):
"""
Mark the revision identified by timestamp for undeletion.
@param undelete: if False, mark the revision to remain deleted.
@type undelete: bool
"""
if not hasattr(self, "_deletedRevs"):
self.loadDeletedRevisions()
if timestamp not in self._deletedRevs:
raise ValueError(u'Timestamp %d is not a deleted revision' % timestamp)
self._deletedRevs[timestamp]['marked'] = undelete
@deprecated_args(comment='reason', throttle=None)
def undelete(self, reason=None):
"""
Undelete revisions based on the markers set by previous calls.
If no calls have been made since loadDeletedRevisions(), everything
will be restored.
Simplest case::
Page(...).undelete('This will restore all revisions')
More complex::
pg = Page(...)
revs = pg.loadDeletedRevisions()
for rev in revs:
if ... #decide whether to undelete a revision
pg.markDeletedRevision(rev) #mark for undeletion
pg.undelete('This will restore only selected revisions.')
@param reason: Reason for the action.
@type reason: basestring
"""
if hasattr(self, "_deletedRevs"):
undelete_revs = [ts for ts, rev in self._deletedRevs.items()
if 'marked' in rev and rev['marked']]
else:
undelete_revs = []
if reason is None:
warn('Not passing a reason for undelete() is deprecated.',
DeprecationWarning)
pywikibot.output(u'Undeleting %s.' % (self.title(asLink=True)))
reason = pywikibot.input(u'Please enter a reason for the undeletion:')
self.site.undelete_page(self, reason, undelete_revs)
@deprecate_arg("throttle", None)
def protect(self, edit=False, move=False, create=None, upload=None,
unprotect=False, reason=None, prompt=None, protections=None,
**kwargs):
"""
Protect or unprotect a wiki page. Requires administrator status.
Valid protection levels (in MediaWiki 1.12) are '' (equivalent to
'none'), 'autoconfirmed', and 'sysop'. If None is given, however,
that protection will be skipped.
@param protections: A dict mapping type of protection to protection
level of that type.
@type protections: dict
@param reason: Reason for the action
@type reason: basestring
@param prompt: Whether to ask user for confirmation (deprecated).
Defaults to protections is None
@type prompt: bool
"""
def process_deprecated_arg(value, arg_name):
# if protections was set and value is None, don't interpret that
# argument. But otherwise warn that the parameter was set
# (even implicit)
if called_using_deprecated_arg:
if value is False: # explicit test for False (don't use not)
value = "sysop"
if value == "none": # 'none' doesn't seem do be accepted
value = ""
if value is not None: # empty string is allowed
protections[arg_name] = value
warn(u'"protections" argument of protect() replaces "{0}"'
.format(arg_name),
DeprecationWarning)
else:
if value:
warn(u'"protections" argument of protect() replaces "{0}";'
u' cannot use both.'.format(arg_name),
RuntimeWarning)
# buffer that, because it might get changed
called_using_deprecated_arg = protections is None
if called_using_deprecated_arg:
protections = {}
process_deprecated_arg(edit, "edit")
process_deprecated_arg(move, "move")
process_deprecated_arg(create, "create")
process_deprecated_arg(upload, "upload")
if reason is None:
pywikibot.output(u'Preparing to protection change of %s.'
% (self.title(asLink=True)))
reason = pywikibot.input(u'Please enter a reason for the action:')
if unprotect:
warn(u'"unprotect" argument of protect() is deprecated',
DeprecationWarning, 2)
protections = dict(
(p_type, "") for p_type in self.applicable_protections())
answer = 'y'
if called_using_deprecated_arg and prompt is None:
prompt = True
if prompt:
warn(u'"prompt" argument of protect() is deprecated',
DeprecationWarning, 2)
if prompt and not hasattr(self.site, '_noProtectPrompt'):
answer = pywikibot.input_choice(
u'Do you want to change the protection level of %s?'
% self.title(asLink=True, forceInterwiki=True),
[('Yes', 'y'), ('No', 'n'), ('All', 'a')],
'n', automatic_quit=False)
if answer == 'a':
answer = 'y'
self.site._noProtectPrompt = True
if answer == 'y':
return self.site.protect(self, protections, reason, **kwargs)
@deprecated_args(comment='summary')
def change_category(self, oldCat, newCat, summary=None, sortKey=None,
inPlace=True, include=[]):
"""
Remove page from oldCat and add it to newCat.
@param oldCat: category to be removed
@type oldCat: Category
@param newCat: category to be added, if any
@type newCat: Category or None
@param summary: string to use as an edit summary
@param sortKey: sortKey to use for the added category.
Unused if newCat is None, or if inPlace=True
If sortKey=True, the sortKey used for oldCat will be used.
@param inPlace: if True, change categories in place rather than
rearranging them.
@param include: list of tags not to be disabled by default in relevant
textlib functions, where CategoryLinks can be searched.
@type include: list
@return: True if page was saved changed, otherwise False.
@rtype: bool
"""
# get list of Category objects the article is in and remove possible
# duplicates
cats = []
for cat in textlib.getCategoryLinks(self.text, site=self.site,
include=include):
if cat not in cats:
cats.append(cat)
if not self.canBeEdited():
pywikibot.output(u"Can't edit %s, skipping it..."
% self.title(asLink=True))
return False
if oldCat not in cats:
pywikibot.error(u'%s is not in category %s!'
% (self.title(asLink=True), oldCat.title()))
return False
# This prevents the bot from adding newCat if it is already present.
if newCat in cats:
newCat = None
oldtext = self.text
if inPlace or self.namespace() == 10:
newtext = textlib.replaceCategoryInPlace(oldtext, oldCat, newCat,
site=self.site)
else:
old_cat_pos = cats.index(oldCat)
if newCat:
if sortKey is True:
# Fetch sortKey from oldCat in current page.
sortKey = cats[old_cat_pos].sortKey
cats[old_cat_pos] = Category(self.site, newCat.title(),
sortKey=sortKey)
else:
cats.pop(old_cat_pos)
try:
newtext = textlib.replaceCategoryLinks(oldtext, cats)
except ValueError:
# Make sure that the only way replaceCategoryLinks() can return
# a ValueError is in the case of interwiki links to self.
pywikibot.output(u'Skipping %s because of interwiki link to '
u'self' % self.title())
return False
if oldtext != newtext:
try:
self.put(newtext, summary)
return True
except pywikibot.PageSaveRelatedError as error:
pywikibot.output(u'Page %s not saved: %s'
% (self.title(asLink=True),
error))
except pywikibot.NoUsername:
pywikibot.output(u'Page %s not saved; sysop privileges '
u'required.' % self.title(asLink=True))
return False
@deprecated('Page.is_flow_page()')
def isFlowPage(self):
"""DEPRECATED: use self.is_flow_page instead."""
return self.is_flow_page()
def is_flow_page(self):
"""
Whether a page is a Flow page.
@rtype: bool
"""
return self.content_model == 'flow-board'
# ####### DEPRECATED METHODS ########
@deprecated("Site.encoding()")
def encoding(self):
"""DEPRECATED: use self.site.encoding instead."""
return self.site.encoding()
@deprecated("Page.title(withNamespace=False)")
def titleWithoutNamespace(self, underscore=False):
"""DEPRECATED: use self.title(withNamespace=False) instead."""
return self.title(underscore=underscore, withNamespace=False,
withSection=False)
@deprecated("Page.title(as_filename=True)")
def titleForFilename(self):
"""DEPRECATED: use self.title(as_filename=True) instead."""
return self.title(as_filename=True)
@deprecated("Page.title(withSection=False)")
def sectionFreeTitle(self, underscore=False):
"""DEPRECATED: use self.title(withSection=False) instead."""
return self.title(underscore=underscore, withSection=False)
@deprecated("Page.title(asLink=True)")
def aslink(self, forceInterwiki=False, textlink=False, noInterwiki=False):
"""DEPRECATED: use self.title(asLink=True) instead."""
return self.title(asLink=True, forceInterwiki=forceInterwiki,
allowInterwiki=not noInterwiki, textlink=textlink)
@deprecated("Page.title(asUrl=True)")
def urlname(self):
"""Return the Page title encoded for use in an URL.
DEPRECATED: use self.title(asUrl=True) instead.
"""
return self.title(asUrl=True)
@deprecated('Page.protection()')
def getRestrictions(self):
"""DEPRECATED. Use self.protection() instead."""
restrictions = self.protection()
return dict((k, list(restrictions[k])) for k in restrictions)
# ###### DISABLED METHODS (warnings provided) ######
# these methods are easily replaced by editing the page's text using
# textlib methods and then using put() on the result.
def removeImage(self, image, put=False, summary=None, safe=True):
"""Old method to remove all instances of an image from page."""
warn('Page.removeImage() is no longer supported.',
_NotImplementedWarning, 2)
def replaceImage(self, image, replacement=None, put=False, summary=None,
safe=True):
"""Old method to replace all instances of an image with another."""
warn('Page.replaceImage() is no longer supported.',
_NotImplementedWarning, 2)
class Page(BasePage):
"""Page: A MediaWiki page."""
@deprecated_args(defaultNamespace='ns', insite=None)
def __init__(self, source, title=u"", ns=0):
"""Instantiate a Page object."""
if isinstance(source, pywikibot.site.BaseSite):
if not title:
raise ValueError(u'Title must be specified and not empty '
'if source is a Site.')
super(Page, self).__init__(source, title, ns)
@property
def raw_extracted_templates(self):
"""
Extract templates using L{textlib.extract_templates_and_params}.
Disabled parts and whitespace are stripped, except for
whitespace in anonymous positional arguments.
This value is cached.
@rtype: list of (str, OrderedDict)
"""
if not hasattr(self, '_raw_extracted_templates'):
templates = textlib.extract_templates_and_params(
self.text, True, True)
self._raw_extracted_templates = templates
return self._raw_extracted_templates
@deprecate_arg("get_redirect", None)
def templatesWithParams(self):
"""
Return templates used on this Page.
The templates are extracted by L{textlib.extract_templates_and_params},
with positional arguments placed first in order, and each named
argument appearing as 'name=value'.
All parameter keys and values for each template are stripped of
whitespace.
@return: a list of tuples with one tuple for each template invocation
in the page, with the template Page as the first entry and a list of
parameters as the second entry.
@rtype: list of (Page, list)
"""
# WARNING: may not return all templates used in particularly
# intricate cases such as template substitution
titles = [t.title() for t in self.templates()]
templates = self.raw_extracted_templates
# backwards-compatibility: convert the dict returned as the second
# element into a list in the format used by old scripts
result = []
for template in templates:
try:
link = pywikibot.Link(template[0], self.site,
defaultNamespace=10)
if link.canonical_title() not in titles:
continue
except pywikibot.Error:
# this is a parser function or magic word, not template name
# the template name might also contain invalid parts
continue
args = template[1]
intkeys = {}
named = {}
positional = []
for key in sorted(args):
try:
intkeys[int(key)] = args[key]
except ValueError:
named[key] = args[key]
for i in range(1, len(intkeys) + 1):
# only those args with consecutive integer keys can be
# treated as positional; an integer could also be used
# (out of order) as the key for a named argument
# example: {{tmp|one|two|5=five|three}}
if i in intkeys:
positional.append(intkeys[i])
else:
for k in intkeys:
if k < 1 or k >= i:
named[str(k)] = intkeys[k]
break
for name in named:
positional.append("%s=%s" % (name, named[name]))
result.append((pywikibot.Page(link, self.site), positional))
return result
def set_redirect_target(self, target_page, create=False, force=False,
keep_section=False, save=True, **kwargs):
"""
Change the page's text to point to the redirect page.
@param target_page: target of the redirect, this argument is required.
@type target_page: pywikibot.Page or string
@param create: if true, it creates the redirect even if the page
doesn't exist.
@type create: bool
@param force: if true, it set the redirect target even the page
doesn't exist or it's not redirect.
@type force: bool
@param keep_section: if the old redirect links to a section
and the new one doesn't it uses the old redirect's section.
@type keep_section: bool
@param save: if true, it saves the page immediately.
@type save: bool
@param kwargs: Arguments which are used for saving the page directly
afterwards, like 'summary' for edit summary.
"""
if isinstance(target_page, basestring):
target_page = pywikibot.Page(self.site, target_page)
elif self.site != target_page.site:
raise pywikibot.InterwikiRedirectPage(self, target_page)
if not self.exists() and not (create or force):
raise pywikibot.NoPage(self)
if self.exists() and not self.isRedirectPage() and not force:
raise pywikibot.IsNotRedirectPage(self)
redirect_regex = self.site.redirectRegex()
if self.exists():
old_text = self.get(get_redirect=True)
else:
old_text = u''
result = redirect_regex.search(old_text)
if result:
oldlink = result.group(1)
if keep_section and '#' in oldlink and target_page.section() is None:
sectionlink = oldlink[oldlink.index('#'):]
target_page = pywikibot.Page(
self.site,
target_page.title() + sectionlink
)
prefix = self.text[:result.start()]
suffix = self.text[result.end():]
else:
prefix = ''
suffix = ''
target_link = target_page.title(asLink=True, textlink=True,
allowInterwiki=False)
target_link = u'#{0} {1}'.format(self.site.redirect(), target_link)
self.text = prefix + target_link + suffix
if save:
self.save(**kwargs)
class FilePage(Page):
"""
A subclass of Page representing a file description page.
Supports the same interface as Page, with some added methods.
"""
@deprecate_arg("insite", None)
def __init__(self, source, title=u""):
"""Constructor."""
self._file_revisions = {} # dictionary to cache File history.
super(FilePage, self).__init__(source, title, 6)
if self.namespace() != 6:
raise ValueError(u"'%s' is not in the file namespace!" % title)
def _load_file_revisions(self, imageinfo):
for file_rev in imageinfo:
file_revision = FileInfo(file_rev)
self._file_revisions[file_revision.timestamp] = file_revision
@property
def latest_file_info(self):
"""
Retrieve and store information of latest Image rev. of FilePage.
At the same time, the whole history of Image is fetched and cached in
self._file_revisions
@return: instance of FileInfo()
"""
if not len(self._file_revisions):
self.site.loadimageinfo(self, history=True)
latest_ts = max(self._file_revisions)
return self._file_revisions[latest_ts]
@property
def oldest_file_info(self):
"""
Retrieve and store information of oldest Image rev. of FilePage.
At the same time, the whole history of Image is fetched and cached in
self._file_revisions
@return: instance of FileInfo()
"""
if not len(self._file_revisions):
self.site.loadimageinfo(self, history=True)
oldest_ts = min(self._file_revisions)
return self._file_revisions[oldest_ts]
def get_file_history(self):
"""
Return the file's version history.
@return: dictionary with:
key: timestamp of the entry
value: instance of FileInfo()
@rtype: dict
"""
if not len(self._file_revisions):
self.site.loadimageinfo(self, history=True)
return self._file_revisions
def getImagePageHtml(self):
"""
Download the file page, and return the HTML, as a unicode string.
Caches the HTML code, so that if you run this method twice on the
same FilePage object, the page will only be downloaded once.
"""
if not hasattr(self, '_imagePageHtml'):
path = "%s/index.php?title=%s" \
% (self.site.scriptpath(), self.title(asUrl=True))
self._imagePageHtml = http.request(self.site, path)
return self._imagePageHtml
@deprecated('get_file_url')
def fileUrl(self):
"""Return the URL for the file described on this page."""
return self.latest_file_info.url
def get_file_url(self, url_width=None, url_height=None, url_param=None):
"""
Return the url or the thumburl of the file described on this page.
Fetch the information if not available.
Once retrieved, thumburl information will also be accessible as
latest_file_info attributes, named as in [1]:
- url, thumburl, thumbwidth and thumbheight
Parameters correspond to iiprops in:
[1] U{https://www.mediawiki.org/wiki/API:Imageinfo}
Parameters validation and error handling left to the API call.
@param width: see iiurlwidth in [1]
@param height: see iiurlheigth in [1]
@param param: see iiurlparam in [1]
@return: latest file url or thumburl
@rtype: unicode
"""
# Plain url is requested.
if url_width is None and url_height is None and url_param is None:
return self.latest_file_info.url
# Thumburl is requested.
self.site.loadimageinfo(self, history=not self._file_revisions,
url_width=url_width, url_height=url_height,
url_param=url_param)
return self.latest_file_info.thumburl
@deprecated("fileIsShared")
def fileIsOnCommons(self):
"""
DEPRECATED. Check if the image is stored on Wikimedia Commons.
@rtype: bool
"""
return self.fileIsShared()
def fileIsShared(self):
"""
Check if the file is stored on any known shared repository.
@rtype: bool
"""
# as of now, the only known repositories are commons and wikitravel
# TODO: put the URLs to family file
if not self.site.has_image_repository:
return False
elif 'wikitravel_shared' in self.site.shared_image_repository():
return self.latest_file_info.url.startswith(
u'http://wikitravel.org/upload/shared/')
else:
return self.latest_file_info.url.startswith(
'https://upload.wikimedia.org/wikipedia/commons/')
@deprecated("FilePage.latest_file_info.sha1")
def getFileMd5Sum(self):
"""Return image file's MD5 checksum."""
# TODO: check whether this needs a User-Agent header added
req = http.fetch(self.fileUrl())
h = hashlib.md5()
h.update(req.raw)
md5Checksum = h.hexdigest()
return md5Checksum
@deprecated("FilePage.latest_file_info.sha1")
def getFileSHA1Sum(self):
"""Return the file's SHA1 checksum."""
return self.latest_file_info.sha1
@deprecated("FilePage.oldest_file_info.user")
def getFirstUploader(self):
"""
Return a list with first uploader of the FilePage and timestamp.
For compatibility with compat only.
"""
return [self.oldest_file_info.user,
unicode(self.oldest_file_info.timestamp.isoformat())]
@deprecated("FilePage.latest_file_info.user")
def getLatestUploader(self):
"""
Return a list with latest uploader of the FilePage and timestamp.
For compatibility with compat only.
"""
return [self.latest_file_info.user,
unicode(self.latest_file_info.timestamp.isoformat())]
@deprecated('FilePage.get_file_history()')
def getFileVersionHistory(self):
"""
Return the file's version history.
@return: A list of dictionaries with the following keys:
[comment, sha1, url, timestamp, metadata,
height, width, mime, user, descriptionurl, size]
@rtype: list
"""
return self.site.loadimageinfo(self, history=True)
def getFileVersionHistoryTable(self):
"""Return the version history in the form of a wiki table."""
lines = []
for info in self.getFileVersionHistory():
dimension = '{width}×{height} px ({size} bytes)'.format(**info)
lines.append('| {timestamp} || {user} || {dimension} |'
'| <nowiki>{comment}</nowiki>'
''.format(dimension=dimension, **info))
return ('{| class="wikitable"\n'
'! {{int:filehist-datetime}} || {{int:filehist-user}} |'
'| {{int:filehist-dimensions}} || {{int:filehist-comment}}\n'
'|-\n%s\n|}\n' % '\n|-\n'.join(lines))
@deprecated_args(step=None)
def usingPages(self, total=None, content=False):
"""
Yield Pages on which the file is displayed.
@param total: iterate no more than this number of pages in total
@param content: if True, load the current content of each iterated page
(default False)
"""
return self.site.imageusage(self, total=total, content=content)
def upload(self, source, **kwargs):
"""
Upload this file to the wiki.
keyword arguments are from site.upload() method.
@param source: Path or URL to the file to be uploaded.
@type source: str
@keyword comment: Edit summary; if this is not provided, then
filepage.text will be used. An empty summary is not permitted.
This may also serve as the initial page text (see below).
@keyword text: Initial page text; if this is not set, then
filepage.text will be used, or comment.
@keyword watch: If true, add filepage to the bot user's watchlist
@keyword ignore_warnings: It may be a static boolean, a callable returning
a boolean or an iterable. The callable gets a list of UploadWarning
instances and the iterable should contain the warning codes for
which an equivalent callable would return True if all UploadWarning
codes are in thet list. If the result is False it'll not continue
uploading the file and otherwise disable any warning and
reattempt to upload the file. NOTE: If report_success is True or
None it'll raise an UploadWarning exception if the static boolean is
False.
@type ignore_warnings: bool or callable or iterable of str
@keyword chunk_size: The chunk size in bytesfor chunked uploading (see
U{https://www.mediawiki.org/wiki/API:Upload#Chunked_uploading}). It
will only upload in chunks, if the version number is 1.20 or higher
and the chunk size is positive but lower than the file size.
@type chunk_size: int
@keyword _file_key: Reuses an already uploaded file using the filekey. If
None (default) it will upload the file.
@type _file_key: str or None
@keyword _offset: When file_key is not None this can be an integer to
continue a previously canceled chunked upload. If False it treats
that as a finished upload. If True it requests the stash info from
the server to determine the offset. By default starts at 0.
@type _offset: int or bool
@keyword _verify_stash: Requests the SHA1 and file size uploaded and
compares it to the local file. Also verifies that _offset is
matching the file size if the _offset is an int. If _offset is False
if verifies that the file size match with the local file. If None
it'll verifies the stash when a file key and offset is given.
@type _verify_stash: bool or None
@keyword report_success: If the upload was successful it'll print a
success message and if ignore_warnings is set to False it'll
raise an UploadWarning if a warning occurred. If it's None (default)
it'll be True if ignore_warnings is a bool and False otherwise. If
it's True or None ignore_warnings must be a bool.
@return: It returns True if the upload was successful and False
otherwise.
@rtype: bool
"""
filename = url = None
if '://' in source:
url = source
else:
filename = source
return self.site.upload(self, source_filename=filename, source_url=url,
**kwargs)
def download(self, filename=None, chunk_size=100 * 1024, revision=None):
"""
Download to filename file of FilePage.
@param filename: filename where to save file:
None: self.title(as_filename=True, withNamespace=False)
will be used
str: provided filename will be used.
@type filename: None or str
@param chunk_size: the size of each chunk to be received and
written to file.
@type chunk_size: int
@param revision: file revision to download:
None: self.latest_file_info will be used
FileInfo: provided revision will be used.
@type revision: None or FileInfo
@return: True if download is successful, False otherwise.
@raise: IOError if filename cannot be written for any reason.
"""
if filename is None:
filename = self.title(as_filename=True, withNamespace=False)
filename = os.path.expanduser(filename)
if revision is None:
revision = self.latest_file_info
req = http.fetch(revision.url, stream=True)
if req.status == 200:
try:
with open(filename, 'wb') as f:
for chunk in req.data.iter_content(chunk_size):
f.write(chunk)
except IOError as e:
raise e
sha1 = compute_file_hash(filename)
return sha1 == revision.sha1
else:
pywikibot.warning('Unsuccesfull request (%s): %s' % (req.status, req.uri))
return False
wrapper = _ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('ImagePage', FilePage)
class Category(Page):
"""A page in the Category: namespace."""
@deprecate_arg("insite", None)
def __init__(self, source, title=u"", sortKey=None):
"""
Constructor.
All parameters are the same as for Page() constructor.
"""
self.sortKey = sortKey
Page.__init__(self, source, title, ns=14)
if self.namespace() != 14:
raise ValueError(u"'%s' is not in the category namespace!"
% title)
@deprecated_args(forceInterwiki=None, textlink=None, noInterwiki=None)
def aslink(self, sortKey=None):
"""
Return a link to place a page in this Category.
Use this only to generate a "true" category link, not for interwikis
or text links to category pages.
@param sortKey: The sort key for the article to be placed in this
Category; if omitted, default sort key is used.
@type sortKey: (optional) unicode
"""
key = sortKey or self.sortKey
if key is not None:
titleWithSortKey = '%s|%s' % (self.title(withSection=False),
key)
else:
titleWithSortKey = self.title(withSection=False)
return '[[%s]]' % titleWithSortKey
@deprecated_args(startFrom=None, cacheResults=None, step=None)
def subcategories(self, recurse=False, total=None, content=False):
"""
Iterate all subcategories of the current category.
@param recurse: if not False or 0, also iterate subcategories of
subcategories. If an int, limit recursion to this number of
levels. (Example: recurse=1 will iterate direct subcats and
first-level sub-sub-cats, but no deeper.)
@type recurse: int or bool
@param total: iterate no more than this number of
subcategories in total (at all levels)
@param content: if True, retrieve the content of the current version
of each category description page (default False)
"""
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
if not hasattr(self, "_subcats"):
self._subcats = []
for member in self.site.categorymembers(
self, member_type='subcat', total=total, content=content):
subcat = Category(member)
self._subcats.append(subcat)
yield subcat
if total is not None:
total -= 1
if total == 0:
return
if recurse:
for item in subcat.subcategories(
recurse, total=total, content=content):
yield item
if total is not None:
total -= 1
if total == 0:
return
else:
for subcat in self._subcats:
yield subcat
if total is not None:
total -= 1
if total == 0:
return
if recurse:
for item in subcat.subcategories(
recurse, total=total, content=content):
yield item
if total is not None:
total -= 1
if total == 0:
return
@deprecated_args(startFrom='startsort', step=None)
def articles(self, recurse=False, total=None,
content=False, namespaces=None, sortby=None,
reverse=False, starttime=None, endtime=None,
startsort=None, endsort=None):
"""
Yield all articles in the current category.
By default, yields all *pages* in the category that are not
subcategories!
@param recurse: if not False or 0, also iterate articles in
subcategories. If an int, limit recursion to this number of
levels. (Example: recurse=1 will iterate articles in first-level
subcats, but no deeper.)
@type recurse: int or bool
@param total: iterate no more than this number of pages in
total (at all levels)
@param namespaces: only yield pages in the specified namespaces
@type namespaces: int or list of ints
@param content: if True, retrieve the content of the current version
of each page (default False)
@param sortby: determines the order in which results are generated,
valid values are "sortkey" (default, results ordered by category
sort key) or "timestamp" (results ordered by time page was
added to the category). This applies recursively.
@type sortby: str
@param reverse: if True, generate results in reverse order
(default False)
@param starttime: if provided, only generate pages added after this
time; not valid unless sortby="timestamp"
@type starttime: pywikibot.Timestamp
@param endtime: if provided, only generate pages added before this
time; not valid unless sortby="timestamp"
@type endtime: pywikibot.Timestamp
@param startsort: if provided, only generate pages >= this title
lexically; not valid if sortby="timestamp"
@type startsort: str
@param endsort: if provided, only generate pages <= this title
lexically; not valid if sortby="timestamp"
@type endsort: str
"""
for member in self.site.categorymembers(self,
namespaces=namespaces,
total=total,
content=content, sortby=sortby,
reverse=reverse,
starttime=starttime,
endtime=endtime,
startsort=startsort,
endsort=endsort,
member_type=['page', 'file']
):
yield member
if total is not None:
total -= 1
if total == 0:
return
if recurse:
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
for subcat in self.subcategories():
for article in subcat.articles(recurse, total=total,
content=content,
namespaces=namespaces,
sortby=sortby,
reverse=reverse,
starttime=starttime,
endtime=endtime,
startsort=startsort,
endsort=endsort
):
yield article
if total is not None:
total -= 1
if total == 0:
return
@deprecated_args(step=None)
def members(self, recurse=False, namespaces=None, total=None,
content=False):
"""Yield all category contents (subcats, pages, and files)."""
for member in self.site.categorymembers(
self, namespaces, total=total, content=content):
yield member
if total is not None:
total -= 1
if total == 0:
return
if recurse:
if not isinstance(recurse, bool) and recurse:
recurse = recurse - 1
for subcat in self.subcategories():
for article in subcat.members(
recurse, namespaces, total=total, content=content):
yield article
if total is not None:
total -= 1
if total == 0:
return
@need_version('1.13')
def isEmptyCategory(self):
"""
Return True if category has no members (including subcategories).
@rtype: bool
"""
ci = self.categoryinfo
return sum(ci[k] for k in ['files', 'pages', 'subcats']) == 0
@need_version('1.11')
def isHiddenCategory(self):
"""
Return True if the category is hidden.
@rtype: bool
"""
return u'hiddencat' in self.properties()
def copyTo(self, cat, message):
"""
Copy text of category page to a new page. Does not move contents.
@param cat: New category title (without namespace) or Category object
@type cat: unicode or Category
@param message: message to use for category creation message
If two %s are provided in message, will be replaced
by (self.title, authorsList)
@type message: unicode
@return: True if copying was successful, False if target page
already existed.
@rtype: bool
"""
# This seems far too specialized to be in the top-level framework
# move to category.py? (Although it doesn't seem to be used there,
# either)
if not isinstance(cat, Category):
cat = self.site.namespaces.CATEGORY + ':' + cat
targetCat = Category(self.site, cat)
else:
targetCat = cat
if targetCat.exists():
pywikibot.output(u'Target page %s already exists!'
% targetCat.title(),
level=pywikibot.logging.WARNING)
return False
else:
pywikibot.output('Moving text from %s to %s.'
% (self.title(), targetCat.title()))
authors = ', '.join(self.contributingUsers())
try:
creationSummary = message % (self.title(), authors)
except TypeError:
creationSummary = message
targetCat.put(self.get(), creationSummary)
return True
def copyAndKeep(self, catname, cfdTemplates, message):
"""
Copy partial category page text (not contents) to a new title.
Like copyTo above, except this removes a list of templates (like
deletion templates) that appear in the old category text. It also
removes all text between the two HTML comments BEGIN CFD TEMPLATE
and END CFD TEMPLATE. (This is to deal with CFD templates that are
substituted.)
Returns true if copying was successful, false if target page already
existed.
@param catname: New category title (without namespace)
@param cfdTemplates: A list (or iterator) of templates to be removed
from the page text
@return: True if copying was successful, False if target page
already existed.
@rtype: bool
"""
# I don't see why we need this as part of the framework either
# move to scripts/category.py?
catname = self.site.namespaces.CATEGORY + ':' + catname
targetCat = Category(self.site, catname)
if targetCat.exists():
pywikibot.warning(u'Target page %s already exists!'
% targetCat.title())
return False
else:
pywikibot.output(
'Moving text from %s to %s.'
% (self.title(), targetCat.title()))
authors = ', '.join(self.contributingUsers())
creationSummary = message % (self.title(), authors)
newtext = self.get()
for regexName in cfdTemplates:
matchcfd = re.compile(r"{{%s.*?}}" % regexName, re.IGNORECASE)
newtext = matchcfd.sub('', newtext)
matchcomment = re.compile(
r"<!--BEGIN CFD TEMPLATE-->.*?<!--END CFD TEMPLATE-->",
re.IGNORECASE | re.MULTILINE | re.DOTALL)
newtext = matchcomment.sub('', newtext)
pos = 0
while (newtext[pos:pos + 1] == "\n"):
pos = pos + 1
newtext = newtext[pos:]
targetCat.put(newtext, creationSummary)
return True
@property
def categoryinfo(self):
"""
Return a dict containing information about the category.
The dict contains values for:
Numbers of pages, subcategories, files, and total contents.
@rtype: dict
"""
return self.site.categoryinfo(self)
def newest_pages(self, total=None):
"""
Return pages in a category ordered by the creation date.
If two or more pages are created at the same time, the pages are
returned in the order they were added to the category. The most recently
added page is returned first.
It only allows to return the pages ordered from newest to oldest, as it
is impossible to determine the oldest page in a category without
checking all pages. But it is possible to check the category in order
with the newly added first and it yields all pages which were created
after the currently checked page was added (and thus there is no page
created after any of the cached but added before the currently checked).
@param total: The total number of pages queried.
@type total: int
@return: A page generator of all pages in a category ordered by the
creation date. From newest to oldest. Note: It currently only
returns Page instances and not a subclass of it if possible. This
might change so don't expect to only get Page instances.
@rtype: generator
"""
def check_cache(latest):
"""Return the cached pages in order and not more than total."""
cached = []
for timestamp in sorted((ts for ts in cache if ts > latest),
reverse=True):
# The complete list can be removed, it'll either yield all of
# them, or only a portion but will skip the rest anyway
cached += cache.pop(timestamp)[:None if total is None else
total - len(cached)]
if total and len(cached) >= total:
break # already got enough
assert total is None or len(cached) <= total, \
'Number of caches is more than total number requested'
return cached
# all pages which have been checked but where created before the
# current page was added, at some point they will be created after
# the current page was added. It saves all pages via the creation
# timestamp. Be prepared for multiple pages.
cache = defaultdict(list)
# TODO: Make site.categorymembers is usable as it returns pages
# There is no total defined, as it's not known how many pages need to be
# checked before the total amount of new pages was found. In worst case
# all pages of a category need to be checked.
for member in pywikibot.data.api.QueryGenerator(
site=self.site, list='categorymembers', cmsort='timestamp',
cmdir='older', cmprop='timestamp|title',
cmtitle=self.title()):
# TODO: Upcast to suitable class
page = pywikibot.Page(self.site, member['title'])
assert page.namespace() == member['ns'], \
'Namespace of the page is not consistent'
cached = check_cache(pywikibot.Timestamp.fromISOformat(
member['timestamp']))
for cached_page in cached:
yield cached_page
if total is not None:
total -= len(cached)
if total <= 0:
break
cache[page.oldest_revision.timestamp] += [page]
else:
# clear cache
assert total is None or total > 0, \
'As many items as given in total already returned'
for cached_page in check_cache(pywikibot.Timestamp.min):
yield cached_page
# ### DEPRECATED METHODS ####
@deprecated("list(Category.subcategories(...))")
def subcategoriesList(self, recurse=False):
"""DEPRECATED: Equivalent to list(self.subcategories(...))."""
return sorted(list(set(self.subcategories(recurse))))
@deprecated("list(Category.articles(...))")
def articlesList(self, recurse=False):
"""DEPRECATED: equivalent to list(self.articles(...))."""
return sorted(list(set(self.articles(recurse))))
@deprecated("Category.categories()")
def supercategories(self):
"""DEPRECATED: equivalent to self.categories()."""
return self.categories()
@deprecated("list(Category.categories(...))")
def supercategoriesList(self):
"""DEPRECATED: equivalent to list(self.categories(...))."""
return sorted(list(set(self.categories())))
class User(Page):
"""
A class that represents a Wiki user.
This class also represents the Wiki page User:<username>
"""
@deprecated_args(site="source", name="title")
def __init__(self, source, title=u''):
"""
Initializer for a User object.
All parameters are the same as for Page() constructor.
"""
if len(title) > 1 and title[0] == u'#':
self._isAutoblock = True
title = title[1:]
else:
self._isAutoblock = False
Page.__init__(self, source, title, ns=2)
if self.namespace() != 2:
raise ValueError(u"'%s' is not in the user namespace!"
% title)
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock.
pywikibot.output(
"This is an autoblock ID, you can only use to unblock it.")
@deprecated('User.username')
def name(self):
"""
The username.
DEPRECATED: use username instead.
@rtype: unicode
"""
return self.username
@property
def username(self):
"""
The username.
Convenience method that returns the title of the page with
namespace prefix omitted, which is the username.
@rtype: unicode
"""
if self._isAutoblock:
return u'#' + self.title(withNamespace=False)
else:
return self.title(withNamespace=False)
def isRegistered(self, force=False):
"""
Determine if the user is registered on the site.
It is possible to have a page named User:xyz and not have
a corresponding user with username xyz.
The page does not need to exist for this method to return
True.
@param force: if True, forces reloading the data from API
@type force: bool
@rtype: bool
"""
# T135828: the registration timestamp may be None but the key exists
return 'registration' in self.getprops(force)
def isAnonymous(self):
"""
Determine if the user is editing as an IP address.
@rtype: bool
"""
return is_IP(self.username)
def getprops(self, force=False):
"""
Return a properties about the user.
@param force: if True, forces reloading the data from API
@type force: bool
@rtype: dict
"""
if force and hasattr(self, '_userprops'):
del self._userprops
if not hasattr(self, '_userprops'):
self._userprops = list(self.site.users([self.username, ]))[0]
if self.isAnonymous():
r = list(self.site.blocks(users=self.username))
if r:
self._userprops['blockedby'] = r[0]['by']
self._userprops['blockreason'] = r[0]['reason']
return self._userprops
@deprecated('User.registration()')
def registrationTime(self, force=False):
"""
DEPRECATED. Fetch registration date for this user.
@param force: if True, forces reloading the data from API
@type force: bool
@return: long (MediaWiki's internal timestamp format) or 0
@rtype: int or long
"""
if self.registration():
return long(self.registration().strftime('%Y%m%d%H%M%S'))
else:
return 0
def registration(self, force=False):
"""
Fetch registration date for this user.
@param force: if True, forces reloading the data from API
@type force: bool
@rtype: pywikibot.Timestamp or None
"""
reg = self.getprops(force).get('registration')
if reg:
return pywikibot.Timestamp.fromISOformat(reg)
def editCount(self, force=False):
"""
Return edit count for a registered user.
Always returns 0 for 'anonymous' users.
@param force: if True, forces reloading the data from API
@type force: bool
@rtype: int or long
"""
return self.getprops(force).get('editcount', 0)
def isBlocked(self, force=False):
"""
Determine whether the user is currently blocked.
@param force: if True, forces reloading the data from API
@type force: bool
@rtype: bool
"""
return 'blockedby' in self.getprops(force)
def isEmailable(self, force=False):
"""
Determine whether emails may be send to this user through MediaWiki.
@param force: if True, forces reloading the data from API
@type force: bool
@rtype: bool
"""
return 'emailable' in self.getprops(force)
def groups(self, force=False):
"""
Return a list of groups to which this user belongs.
The list of groups may be empty.
@param force: if True, forces reloading the data from API
@type force: bool
@return: groups property
@rtype: list
"""
return self.getprops(force).get('groups', [])
def getUserPage(self, subpage=u''):
"""
Return a Page object relative to this user's main page.
@param subpage: subpage part to be appended to the main
page title (optional)
@type subpage: unicode
@return: Page object of user page or user subpage
@rtype: Page
"""
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock, so has no user pages per se.
raise AutoblockUser(
"This is an autoblock ID, you can only use to unblock it.")
if subpage:
subpage = u'/' + subpage
return Page(Link(self.title() + subpage, self.site))
def getUserTalkPage(self, subpage=u''):
"""
Return a Page object relative to this user's main talk page.
@param subpage: subpage part to be appended to the main
talk page title (optional)
@type subpage: unicode
@return: Page object of user talk page or user talk subpage
@rtype: Page
"""
if self._isAutoblock:
# This user is probably being queried for purpose of lifting
# an autoblock, so has no user talk pages per se.
raise AutoblockUser(
"This is an autoblock ID, you can only use to unblock it.")
if subpage:
subpage = u'/' + subpage
return Page(Link(self.username + subpage,
self.site, defaultNamespace=3))
def send_email(self, subject, text, ccme=False):
"""
Send an email to this user via MediaWiki's email interface.
@param subject: the subject header of the mail
@type subject: unicode
@param text: mail body
@type text: unicode
@param ccme: if True, sends a copy of this email to the bot
@type ccme: bool
@raises NotEmailableError: the user of this User is not emailable
@raises UserRightsError: logged in user does not have 'sendemail' right
@return: operation successful indicator
@rtype: bool
"""
if not self.isEmailable():
raise NotEmailableError(self)
if not self.site.has_right('sendemail'):
raise UserRightsError('You don\'t have permission to send mail')
params = {
'action': 'emailuser',
'target': self.username,
'token': self.site.tokens['email'],
'subject': subject,
'text': text,
}
if ccme:
params['ccme'] = 1
mailrequest = self.site._simple_request(**params)
maildata = mailrequest.submit()
if 'emailuser' in maildata:
if maildata['emailuser']['result'] == u'Success':
return True
return False
@deprecated('send_email')
def sendMail(self, subject, text, ccme=False):
"""
Send an email to this user via MediaWiki's email interface.
Outputs 'Email sent' if the email was sent.
@param subject: the subject header of the mail
@type subject: unicode
@param text: mail body
@type text: unicode
@param ccme: if True, sends a copy of this email to the bot
@type ccme: bool
@raises NotEmailableError: the user of this User is not emailable
@raises UserRightsError: logged in user does not have 'sendemail' right
@return: operation successful indicator
@rtype: bool
"""
if self.send_email(subject, text, ccme=ccme):
pywikibot.output('Email sent.')
return True
return False
def block(self, expiry, reason, anononly=True, nocreate=True,
autoblock=True, noemail=False, reblock=False):
"""
Block user.
@param expiry: When the block should expire
@type expiry: pywikibot.Timestamp|str
@param reason: Block reason
@type reason: basestring
@param anononly: Whether block should only affect anonymous users
@type anononly: bool
@param nocreate: Whether to block account creation
@type nocreate: bool
@param autoblock: Whether to enable autoblock
@type autoblock: bool
@param noemail: Whether to disable email access
@type noemail: bool
@param reblock: Whether to reblock if a block already is set
@type reblock: bool
@return: None
"""
try:
self.site.blockuser(self, expiry, reason, anononly, nocreate,
autoblock, noemail, reblock)
except APIError as err:
if err.code == 'invalidrange':
raise ValueError("%s is not a valid IP range." % self.username)
else:
raise err
def unblock(self, reason=None):
"""
Remove the block for the user.
@param reason: Reason for the unblock.
@type reason: basestring
"""
self.site.unblockuser(self, reason)
@deprecated("contributions")
@deprecate_arg("limit", "total") # To be consistent with rest of framework
def editedPages(self, total=500):
"""
DEPRECATED. Use contributions().
Yields pywikibot.Page objects that this user has
edited, with an upper bound of 'total'. Pages returned are not
guaranteed to be unique.
@param total: limit result to this number of pages.
@type total: int.
"""
for item in self.contributions(total=total):
yield item[0]
@deprecated_args(limit='total', namespace='namespaces')
def contributions(self, total=500, namespaces=[]):
"""
Yield tuples describing this user edits.
Each tuple is composed of a pywikibot.Page object,
the revision id (int), the edit timestamp (as a pywikibot.Timestamp
object), and the comment (unicode).
Pages returned are not guaranteed to be unique.
@param total: limit result to this number of pages
@type total: int
@param namespaces: only iterate links in these namespaces
@type namespaces: list
"""
for contrib in self.site.usercontribs(
user=self.username, namespaces=namespaces, total=total):
ts = pywikibot.Timestamp.fromISOformat(contrib['timestamp'])
yield (Page(self.site, contrib['title'], contrib['ns']),
contrib['revid'],
ts,
contrib.get('comment'))
@deprecate_arg("number", "total")
def uploadedImages(self, total=10):
"""
Yield tuples describing files uploaded by this user.
Each tuple is composed of a pywikibot.Page, the timestamp (str in
ISO8601 format), comment (unicode) and a bool for pageid > 0.
Pages returned are not guaranteed to be unique.
@param total: limit result to this number of pages
@type total: int
"""
if not self.isRegistered():
raise StopIteration
for item in self.site.logevents(
logtype='upload', user=self.username, total=total):
yield (item.page(),
unicode(item.timestamp()),
item.comment(),
item.pageid() > 0
)
@property
def is_thankable(self):
"""
Determine if the user has thanks notifications enabled.
NOTE: This doesn't accurately determine if thanks is enabled for user.
Privacy of thanks preferences is under discussion, please see
https://phabricator.wikimedia.org/T57401#2216861, and
https://phabricator.wikimedia.org/T120753#1863894
@rtype: bool
"""
if self.isAnonymous():
return False
if 'bot' in self.groups():
return False
return True
class WikibasePage(BasePage):
"""
The base page for the Wikibase extension.
There should be no need to instantiate this directly.
"""
def __init__(self, site, title=u"", **kwargs):
"""
Constructor.
If title is provided, either ns or entity_type must also be provided,
and will be checked against the title parsed using the Page
initialisation logic.
@param site: Wikibase data site
@type site: DataSite
@param title: normalized title of the page
@type title: unicode
@kwarg ns: namespace
@type ns: Namespace instance, or int
@kwarg entity_type: Wikibase entity type
@type entity_type: str ('item' or 'property')
@raises TypeError: incorrect use of parameters
@raises ValueError: incorrect namespace
@raises pywikibot.Error: title parsing problems
@raises NotImplementedError: the entity type is not supported
"""
if not isinstance(site, pywikibot.site.DataSite):
raise TypeError("site must be a pywikibot.site.DataSite object")
if title and ('ns' not in kwargs and 'entity_type' not in kwargs):
pywikibot.debug("%s.__init__: %s title %r specified without "
"ns or entity_type"
% (self.__class__.__name__, site, title),
layer='wikibase')
self._namespace = None
if 'ns' in kwargs:
if isinstance(kwargs['ns'], Namespace):
self._namespace = kwargs.pop('ns')
kwargs['ns'] = self._namespace.id
else:
# numerical namespace given
ns = int(kwargs['ns'])
if site.item_namespace.id == ns:
self._namespace = site.item_namespace
elif site.property_namespace.id == ns:
self._namespace = site.property_namespace
else:
raise ValueError('%r: Namespace "%d" is not valid'
% self.site)
if 'entity_type' in kwargs:
entity_type = kwargs.pop('entity_type')
if entity_type == 'item':
entity_type_ns = site.item_namespace
elif entity_type == 'property':
entity_type_ns = site.property_namespace
else:
raise ValueError('Wikibase entity type "%s" unknown'
% entity_type)
if self._namespace:
if self._namespace != entity_type_ns:
raise ValueError('Namespace "%d" is not valid for Wikibase'
' entity type "%s"'
% (kwargs['ns'], entity_type))
else:
self._namespace = entity_type_ns
kwargs['ns'] = self._namespace.id
super(WikibasePage, self).__init__(site, title, **kwargs)
# If a title was not provided,
# avoid checks which may cause an exception.
if not title:
self.repo = site
return
if self._namespace:
if self._link.namespace != self._namespace.id:
raise ValueError(u"'%s' is not in the namespace %d"
% (title, self._namespace.id))
else:
# Neither ns or entity_type was provided.
# Use the _link to determine entity type.
ns = self._link.namespace
if self.site.item_namespace.id == ns:
self._namespace = self.site.item_namespace
elif self.site.property_namespace.id == ns:
self._namespace = self.site.property_namespace
else:
raise ValueError('%r: Namespace "%r" is not valid'
% (self.site, ns))
# .site forces a parse of the Link title to determine site
self.repo = self.site
# Link.__init__, called from Page.__init__, has cleaned the title
# stripping whitespace and uppercasing the first letter according
# to the namespace case=first-letter.
self.id = self._link.title
if not self.is_valid_id(self.id):
raise pywikibot.InvalidTitle(
"'%s' is not a valid %s page title"
% (self.id, self.entity_type))
def _defined_by(self, singular=False):
"""
Internal function to provide the API parameters to identify the entity.
The API parameters may be 'id' if the ItemPage has one,
or 'site'&'title' if instantiated via ItemPage.fromPage with
lazy_load enabled.
Once an item's "p/q##" is looked up, that will be used for all future
requests.
An empty dict is returned if the ItemPage is instantiated without
either ID (internally it has id = '-1') or site&title.
@param singular: Whether the parameter names should use the singular
form
@type singular: bool
@return: API parameters
@rtype: dict
"""
params = {}
if singular:
id = 'id'
site = 'site'
title = 'title'
else:
id = 'ids'
site = 'sites'
title = 'titles'
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
# id overrides all
if hasattr(self, 'id'):
if self.id != '-1':
params[id] = self.id
elif lazy_loading_id:
params[site] = self._site.dbName()
params[title] = self._title
else:
# if none of the above applies, this item is in an invalid state
# which needs to be raise as an exception, but also logged in case
# an exception handler is catching the generic Error.
pywikibot.error('%s is in invalid state'
% self.__class__.__name__)
raise pywikibot.Error('%s is in invalid state'
% self.__class__.__name__)
return params
def __getattribute__(self, name):
"""Low-level attribute getter. Deprecates lastrevid."""
if name == 'lastrevid':
issue_deprecation_warning(
'WikibasePage.lastrevid', 'latest_revision_id', 2)
name = '_revid'
return super(WikibasePage, self).__getattribute__(name)
def __setattr__(self, attr, value):
"""Attribute setter. Deprecates lastrevid."""
if attr == 'lastrevid':
issue_deprecation_warning(
'WikibasePage.lastrevid', 'latest_revision_id', 2)
attr = '_revid'
return super(WikibasePage, self).__setattr__(attr, value)
def __delattr__(self, attr):
"""Attribute deleter. Deprecates lastrevid."""
if attr == 'lastrevid':
issue_deprecation_warning(
'WikibasePage.lastrevid', 'latest_revision_id', 2)
attr = '_revid'
return super(WikibasePage, self).__delattr__(attr)
def namespace(self):
"""
Return the number of the namespace of the entity.
@return: Namespace id
@rtype: int
"""
return self._namespace.id
def exists(self):
"""
Determine if an entity exists in the data repository.
@rtype: bool
"""
if not hasattr(self, '_content'):
try:
self.get(get_redirect=True)
return True
except pywikibot.NoPage:
return False
return 'lastrevid' in self._content
def botMayEdit(self):
"""
Return whether bots may edit this page.
Because there is currently no system to mark a page that it shouldn't
be edited by bots on Wikibase pages it always returns True. The content
of the page is not text but a dict, the original way (to search for a
template) doesn't apply.
@return: True
@rtype: bool
"""
return True
def get(self, force=False, *args, **kwargs):
"""
Fetch all page data, and cache it.
@param force: override caching
@type force: bool
@raise NotImplementedError: a value in args or kwargs
"""
if args or kwargs:
raise NotImplementedError(
'{0}.get does not implement var args: {1!r} and {2!r}'.format(
self.__class__, args, kwargs))
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
if force or not hasattr(self, '_content'):
identification = self._defined_by()
if not identification:
raise pywikibot.NoPage(self)
try:
data = self.repo.loadcontent(identification)
except APIError as err:
if err.code == 'no-such-entity':
raise pywikibot.NoPage(self)
raise
item_index = list(data.keys())[0]
if lazy_loading_id or item_index != '-1':
self.id = item_index
self._content = data[item_index]
if 'lastrevid' in self._content:
self.latest_revision_id = self._content['lastrevid']
else:
if lazy_loading_id:
p = Page(self._site, self._title)
if not p.exists():
raise pywikibot.NoPage(p)
raise pywikibot.NoPage(self)
if 'pageid' in self._content:
self._pageid = self._content['pageid']
# aliases
self.aliases = {}
if 'aliases' in self._content:
for lang in self._content['aliases']:
self.aliases[lang] = []
for value in self._content['aliases'][lang]:
self.aliases[lang].append(value['value'])
# labels
self.labels = {}
if 'labels' in self._content:
for lang in self._content['labels']:
if 'removed' not in self._content['labels'][lang]: # Bug T56767
self.labels[lang] = self._content['labels'][lang]['value']
# descriptions
self.descriptions = {}
if 'descriptions' in self._content:
for lang in self._content['descriptions']:
self.descriptions[lang] = self._content[
'descriptions'][lang]['value']
# claims
self.claims = {}
if 'claims' in self._content:
for pid in self._content['claims']:
self.claims[pid] = []
for claim in self._content['claims'][pid]:
c = Claim.fromJSON(self.repo, claim)
c.on_item = self
self.claims[pid].append(c)
return {'aliases': self.aliases,
'labels': self.labels,
'descriptions': self.descriptions,
'claims': self.claims,
}
def _diff_to(self, type_key, key_name, value_name, diffto, data):
assert type_key not in data, 'Key type must be defined in data'
source = self._normalizeLanguages(getattr(self, type_key)).copy()
diffto = {} if not diffto else diffto.get(type_key, {})
new = set(source.keys())
for key in diffto:
if key in new:
if source[key] == diffto[key][value_name]:
del source[key]
else:
source[key] = ''
for key, value in source.items():
source[key] = {key_name: key, value_name: value}
if source:
data[type_key] = source
def toJSON(self, diffto=None):
"""
Create JSON suitable for Wikibase API.
When diffto is provided, JSON representing differences
to the provided data is created.
@param diffto: JSON containing claim data
@type diffto: dict
@rtype: dict
"""
data = {}
self._diff_to('labels', 'language', 'value', diffto, data)
self._diff_to('descriptions', 'language', 'value', diffto, data)
aliases = self._normalizeLanguages(self.aliases).copy()
if diffto and 'aliases' in diffto:
for lang in set(diffto['aliases'].keys()) - set(aliases.keys()):
aliases[lang] = []
for lang, strings in list(aliases.items()):
if diffto and 'aliases' in diffto and lang in diffto['aliases']:
empty = len(diffto['aliases'][lang]) - len(strings)
if empty > 0:
strings += [''] * empty
elif Counter(val['value'] for val
in diffto['aliases'][lang]) == Counter(strings):
del aliases[lang]
if lang in aliases:
aliases[lang] = [{'language': lang, 'value': i} for i in strings]
if aliases:
data['aliases'] = aliases
claims = {}
for prop in self.claims:
if len(self.claims[prop]) > 0:
claims[prop] = [claim.toJSON() for claim in self.claims[prop]]
if diffto and 'claims' in diffto:
temp = defaultdict(list)
claim_ids = set()
diffto_claims = diffto['claims']
for prop in claims:
for claim in claims[prop]:
if (prop not in diffto_claims or
claim not in diffto_claims[prop]):
temp[prop].append(claim)
claim_ids.add(claim['id'])
for prop, prop_claims in diffto_claims.items():
for claim in prop_claims:
if 'id' in claim and claim['id'] not in claim_ids:
temp[prop].append({'id': claim['id'], 'remove': ''})
claims = temp
if claims:
data['claims'] = claims
return data
def getID(self, numeric=False, force=False):
"""
Get the entity identifier.
@param numeric: Strip the first letter and return an int
@type numeric: bool
@param force: Force an update of new data
@type force: bool
"""
if not hasattr(self, 'id') or force:
self.get(force=force)
if numeric:
return int(self.id[1:]) if self.id != '-1' else -1
return self.id
@classmethod
def is_valid_id(cls, entity_id):
"""
Whether the string can be a valid id of the entity type.
@param entity_id: The ID to test.
@type entity_id: basestring
@rtype: bool
"""
if not hasattr(cls, 'title_pattern'):
return True
return bool(re.match(cls.title_pattern, entity_id))
@property
def latest_revision_id(self):
"""
Get the revision identifier for the most recent revision of the entity.
@rtype: long
"""
if not hasattr(self, '_revid'):
self.get()
return self._revid
@latest_revision_id.setter
def latest_revision_id(self, value):
self._revid = value
@latest_revision_id.deleter
def latest_revision_id(self):
self.clear_cache()
@staticmethod
def _normalizeLanguages(data):
"""
Helper function to replace site objects with their language codes.
@param data: The dict to normalize.
@type data: dict
@return: the altered dict from parameter data.
@rtype: dict
"""
for key in data:
if isinstance(key, pywikibot.site.BaseSite):
data[key.lang] = data[key]
del data[key]
return data
@classmethod
def _normalizeData(cls, data):
"""
Helper function to expand data into the Wikibase API structure.
@param data: The dict to normalize
@type data: dict
@return: the altered dict from parameter data.
@rtype: dict
"""
for prop in ('labels', 'descriptions'):
if prop not in data:
continue
data[prop] = cls._normalizeLanguages(data[prop])
for key, value in data[prop].items():
if isinstance(value, basestring):
data[prop][key] = {'language': key, 'value': value}
if 'aliases' in data:
for key, values in data['aliases'].items():
if (isinstance(values, list) and
isinstance(values[0], basestring)):
data['aliases'][key] = [{'language': key, 'value': value}
for value in values]
return data
def getdbName(self, site):
"""
Helper function to obtain a dbName for a Site.
@param site: The site to look up.
@type site: Site
"""
if isinstance(site, pywikibot.site.BaseSite):
return site.dbName()
return site
@allow_asynchronous
def editEntity(self, data=None, **kwargs):
"""
Edit an entity using Wikibase wbeditentity API.
This function is wrapped around by:
- editLabels
- editDescriptions
- editAliases
- ItemPage.setSitelinks
@param data: Data to be saved
@type data: dict, or None to save the current content of the entity.
@keyword asynchronous: if True, launch a separate thread to edit
asynchronously
@type asynchronous: bool
@keyword callback: a callable object that will be called after the entity
has been updated. It must take two arguments: (1) a WikibasePage
object, and (2) an exception instance, which will be None if the
page was saved successfully. This is intended for use by bots that
need to keep track of which saves were successful.
@type callback: callable
"""
if hasattr(self, '_revid'):
baserevid = self.latest_revision_id
else:
baserevid = None
if data is None:
data = self.toJSON(diffto=(self._content if hasattr(self, '_content') else None))
else:
data = WikibasePage._normalizeData(data)
updates = self.repo.editEntity(self._defined_by(singular=True), data,
baserevid=baserevid, **kwargs)
self.latest_revision_id = updates['entity']['lastrevid']
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
if lazy_loading_id or self.id == '-1':
self.__init__(self.site, title=updates['entity']['id'])
self._content = updates['entity']
self.get()
def editLabels(self, labels, **kwargs):
"""
Edit entity labels.
Labels should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the label.
"""
data = {'labels': labels}
self.editEntity(data, **kwargs)
def editDescriptions(self, descriptions, **kwargs):
"""
Edit entity descriptions.
Descriptions should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the description.
"""
data = {'descriptions': descriptions}
self.editEntity(data, **kwargs)
def editAliases(self, aliases, **kwargs):
"""
Edit entity aliases.
Aliases should be a dict, with the key
as a language or a site object. The
value should be a list of strings.
"""
data = {'aliases': aliases}
self.editEntity(data, **kwargs)
def set_redirect_target(self, target_page, create=False, force=False,
keep_section=False, save=True, **kwargs):
"""
Set target of a redirect for a Wikibase page.
Has not been implemented in the Wikibase API yet, except for ItemPage.
"""
raise NotImplementedError
class ItemPage(WikibasePage):
"""
Wikibase entity of type 'item'.
A Wikibase item may be defined by either a 'Q' id (qid),
or by a site & title.
If an item is defined by site & title, once an item's qid has
been looked up, the item is then defined by the qid.
"""
entity_type = 'item'
title_pattern = r'^(Q[1-9]\d*|-1)$'
def __init__(self, site, title=None, ns=None):
"""
Constructor.
@param site: data repository
@type site: pywikibot.site.DataSite
@param title: id number of item, "Q###",
-1 or None for an empty item.
@type title: str
@type ns: namespace
@type ns: Namespace instance, or int, or None
for default item_namespace
"""
if ns is None:
ns = site.item_namespace
# Special case for empty item.
if title is None or title == '-1':
super(ItemPage, self).__init__(site, u'-1', ns=ns)
assert self.id == '-1'
return
# we don't want empty titles
if not title:
raise pywikibot.InvalidTitle("Item's title cannot be empty")
super(ItemPage, self).__init__(site, title, ns=ns)
assert self.id == self._link.title
def title(self, **kwargs):
"""
Return ID as title of the ItemPage.
If the ItemPage was lazy-loaded via ItemPage.fromPage, this method
will fetch the wikibase item ID for the page, potentially raising
NoPage with the page on the linked wiki if it does not exist, or
does not have a corresponding wikibase item ID.
This method also refreshes the title if the id property was set.
i.e. item.id = 'Q60'
All optional keyword parameters are passed to the superclass.
"""
# If instantiated via ItemPage.fromPage using site and title,
# _site and _title exist, and id does not exist.
lazy_loading_id = not hasattr(self, 'id') and hasattr(self, '_site')
if lazy_loading_id or self._link._text != self.id:
# If the item is lazy loaded or has been modified,
# _link._text is stale. Removing _link._title
# forces Link to re-parse ._text into ._title.
if hasattr(self._link, '_title'):
del self._link._title
self._link._text = self.getID()
self._link.parse()
# Remove the temporary values that are no longer needed after
# the .getID() above has called .get(), which populated .id
if hasattr(self, '_site'):
del self._title
del self._site
return super(ItemPage, self).title(**kwargs)
@classmethod
def fromPage(cls, page, lazy_load=False):
"""
Get the ItemPage for a Page that links to it.
@param page: Page to look for corresponding data item
@type page: pywikibot.Page
@param lazy_load: Do not raise NoPage if either page or corresponding
ItemPage does not exist.
@type lazy_load: bool
@rtype: ItemPage
@raise NoPage: There is no corresponding ItemPage for the page
@raise WikiBaseError: The site of the page has no data repository.
"""
if not page.site.has_data_repository:
raise pywikibot.WikiBaseError('{0} has no data repository'
''.format(page.site))
if not lazy_load and not page.exists():
raise pywikibot.NoPage(page)
repo = page.site.data_repository()
if hasattr(page,
'_pageprops') and page.properties().get('wikibase_item'):
# If we have already fetched the pageprops for something else,
# we already have the id, so use it
return cls(repo, page.properties().get('wikibase_item'))
i = cls(repo)
# clear id, and temporarily store data needed to lazy loading the item
del i.id
i._site = page.site
i._title = page.title(withSection=False)
if not lazy_load and not i.exists():
raise pywikibot.NoPage(i)
return i
@classmethod
def from_entity_uri(cls, site, uri, lazy_load=False):
"""
Get the ItemPage from its entity uri.
@param site: The Wikibase site for the item.
@type site: pywikibot.site.DataSite
@param uri: Entity uri for the Wikibase item.
@type uri: basestring
@param lazy_load: Do not raise NoPage if ItemPage does not exist.
@type lazy_load: bool
@rtype: ItemPage
@raise TypeError: Site is not a valid DataSite.
@raise ValueError: Site does not match the base of the provided uri.
@raise NoPage: Uri points to non-existent item.
"""
if not isinstance(site, DataSite):
raise TypeError('{0} is not a data repository.'.format(site))
base_uri, _, qid = uri.rpartition('/')
if base_uri != site.concept_base_uri.rstrip('/'):
raise ValueError(
'The supplied data repository ({repo}) does not correspond to '
'that of the item ({item})'.format(
repo=site.concept_base_uri.rstrip('/'),
item=base_uri))
item = cls(site, qid)
if not lazy_load and not item.exists():
raise pywikibot.NoPage(item)
return item
def get(self, force=False, get_redirect=False, *args, **kwargs):
"""
Fetch all item data, and cache it.
@param force: override caching
@type force: bool
@param get_redirect: return the item content, do not follow the
redirect, do not raise an exception.
@type get_redirect: bool
@raise NotImplementedError: a value in args or kwargs
"""
data = super(ItemPage, self).get(force, *args, **kwargs)
if self.isRedirectPage() and not get_redirect:
raise pywikibot.IsRedirectPage(self)
# sitelinks
self.sitelinks = {}
if 'sitelinks' in self._content:
for dbname in self._content['sitelinks']:
self.sitelinks[dbname] = self._content[
'sitelinks'][dbname]['title']
data['sitelinks'] = self.sitelinks
return data
@need_version('1.28-wmf.23')
def concept_uri(self):
"""Return the full concept URI."""
return '{0}{1}'.format(self.site.concept_base_uri, self.id)
def getRedirectTarget(self):
"""Return the redirect target for this page."""
target = super(ItemPage, self).getRedirectTarget()
cmodel = target.content_model
if cmodel != 'wikibase-item':
raise pywikibot.Error(u'%s has redirect target %s with content '
u'model %s instead of wikibase-item' %
(self, target, cmodel))
return self.__class__(target.site, target.title(), target.namespace())
def toJSON(self, diffto=None):
"""
Create JSON suitable for Wikibase API.
When diffto is provided, JSON representing differences
to the provided data is created.
@param diffto: JSON containing claim data
@type diffto: dict
@rtype: dict
"""
data = super(ItemPage, self).toJSON(diffto=diffto)
self._diff_to('sitelinks', 'site', 'title', diffto, data)
return data
def iterlinks(self, family=None):
"""
Iterate through all the sitelinks.
@param family: string/Family object which represents what family of
links to iterate
@type family: str|pywikibot.family.Family
@return: iterator of pywikibot.Page objects
@rtype: iterator
"""
if not hasattr(self, 'sitelinks'):
self.get()
if family is not None and not isinstance(family, Family):
family = Family.load(family)
for dbname in self.sitelinks:
pg = Page(pywikibot.site.APISite.fromDBName(dbname),
self.sitelinks[dbname])
if family is None or family == pg.site.family:
yield pg
def getSitelink(self, site, force=False):
"""
Return the title for the specific site.
If the item doesn't have that language, raise NoPage.
@param site: Site to find the linked page of.
@type site: pywikibot.Site or database name
@param force: override caching
@rtype: unicode
"""
if force or not hasattr(self, '_content'):
self.get(force=force)
dbname = self.getdbName(site)
if dbname not in self.sitelinks:
raise pywikibot.NoPage(self)
else:
return self.sitelinks[dbname]
def setSitelink(self, sitelink, **kwargs):
"""
Set sitelinks. Calls setSitelinks().
A sitelink can either be a Page object,
or a {'site':dbname,'title':title} dictionary.
"""
self.setSitelinks([sitelink], **kwargs)
def removeSitelink(self, site, **kwargs):
"""
Remove a sitelink.
A site can either be a Site object, or it can be a dbName.
"""
self.removeSitelinks([site], **kwargs)
def removeSitelinks(self, sites, **kwargs):
"""
Remove sitelinks.
Sites should be a list, with values either
being Site objects, or dbNames.
"""
data = []
for site in sites:
site = self.getdbName(site)
data.append({'site': site, 'title': ''})
self.setSitelinks(data, **kwargs)
def setSitelinks(self, sitelinks, **kwargs):
"""
Set sitelinks.
Sitelinks should be a list. Each item in the
list can either be a Page object, or a dict
with a value for 'site' and 'title'.
"""
data = {}
for obj in sitelinks:
if isinstance(obj, Page):
dbName = self.getdbName(obj.site)
data[dbName] = {'site': dbName, 'title': obj.title()}
else:
# TODO: Do some verification here
dbName = obj['site']
data[dbName] = obj
data = {'sitelinks': data}
self.editEntity(data, **kwargs)
@allow_asynchronous
def addClaim(self, claim, bot=True, **kwargs):
"""
Add a claim to the item.
@param claim: The claim to add
@type claim: Claim
@param bot: Whether to flag as bot (if possible)
@type bot: bool
@keyword asynchronous: if True, launch a separate thread to add claim
asynchronously
@type asynchronous: bool
@keyword callback: a callable object that will be called after the
claim has been added. It must take two arguments: (1) an ItemPage
object, and (2) an exception instance, which will be None if the
item was saved successfully. This is intended for use by bots that
need to keep track of which saves were successful.
@type callback: callable
"""
self.repo.addClaim(self, claim, bot=bot, **kwargs)
claim.on_item = self
def removeClaims(self, claims, **kwargs):
"""
Remove the claims from the item.
@param claims: list of claims to be removed
@type claims: list or pywikibot.Claim
"""
# this check allows single claims to be removed by pushing them into a
# list of length one.
if isinstance(claims, pywikibot.Claim):
claims = [claims]
self.repo.removeClaims(claims, **kwargs)
def mergeInto(self, item, **kwargs):
"""
Merge the item into another item.
@param item: The item to merge into
@type item: ItemPage
"""
data = self.repo.mergeItems(fromItem=self, toItem=item, **kwargs)
if not data.get('success', 0):
return
self.latest_revision_id = data['from']['lastrevid']
item.latest_revision_id = data['to']['lastrevid']
if data.get('redirected', 0):
self._isredir = True
def set_redirect_target(self, target_page, create=False, force=False,
keep_section=False, save=True, **kwargs):
"""
Make the item redirect to another item.
You need to define an extra argument to make this work, like save=True
@param target_page: target of the redirect, this argument is required.
@type target_page: pywikibot.Item or string
@param force: if true, it sets the redirect target even the page
is not redirect.
@type force: bool
"""
if isinstance(target_page, basestring):
target_page = pywikibot.ItemPage(self.repo, target_page)
elif self.repo != target_page.repo:
raise pywikibot.InterwikiRedirectPage(self, target_page)
if self.exists() and not self.isRedirectPage() and not force:
raise pywikibot.IsNotRedirectPage(self)
if not save or keep_section or create:
raise NotImplementedError
self.repo.set_redirect_target(
from_item=self, to_item=target_page)
def isRedirectPage(self):
"""Return True if item is a redirect, False if not or not existing."""
if hasattr(self, '_content') and not hasattr(self, '_isredir'):
self._isredir = self.id != self._content.get('id', self.id)
return self._isredir
return super(ItemPage, self).isRedirectPage()
# alias for backwards compatibility
concept_url = redirect_func(concept_uri, old_name='concept_url',
class_name='ItemPage')
class Property(object):
"""
A Wikibase property.
While every Wikibase property has a Page on the data repository,
this object is for when the property is used as part of another concept
where the property is not _the_ Page of the property.
For example, a claim on an ItemPage has many property attributes, and so
it subclasses this Property class, but a claim does not have Page like
behaviour and semantics.
"""
types = {'wikibase-item': ItemPage,
# 'wikibase-property': PropertyPage, must be declared first
'string': basestring,
'commonsMedia': FilePage,
'globe-coordinate': pywikibot.Coordinate,
'url': basestring,
'time': pywikibot.WbTime,
'quantity': pywikibot.WbQuantity,
'monolingualtext': pywikibot.WbMonolingualText,
'math': basestring,
'external-id': basestring,
'geo-shape': pywikibot.WbGeoShape,
'tabular-data': pywikibot.WbTabularData,
}
# the value type where different from the type
value_types = {'wikibase-item': 'wikibase-entityid',
'wikibase-property': 'wikibase-entityid',
'commonsMedia': 'string',
'url': 'string',
'globe-coordinate': 'globecoordinate',
'math': 'string',
'external-id': 'string',
'geo-shape': 'string',
'tabular-data': 'string',
}
def __init__(self, site, id, datatype=None):
"""
Constructor.
@param site: data repository
@type site: pywikibot.site.DataSite
@param id: id of the property
@type id: basestring
@param datatype: datatype of the property;
if not given, it will be queried via the API
@type datatype: basestring
"""
self.repo = site
self.id = id.upper()
if datatype:
self._type = datatype
@property
def type(self):
"""
Return the type of this property.
@rtype: str
"""
if not hasattr(self, '_type'):
self._type = self.repo.getPropertyType(self)
return self._type
@deprecated("Property.type")
def getType(self):
"""
Return the type of this property.
It returns 'globecoordinate' for type 'globe-coordinate'
in order to be backwards compatible. See
https://gerrit.wikimedia.org/r/#/c/135405/ for background.
"""
if self.type == 'globe-coordinate':
return 'globecoordinate'
else:
return self._type
def getID(self, numeric=False):
"""
Get the identifier of this property.
@param numeric: Strip the first letter and return an int
@type numeric: bool
"""
if numeric:
return int(self.id[1:])
else:
return self.id
class PropertyPage(WikibasePage, Property):
"""
A Wikibase entity in the property namespace.
Should be created as::
PropertyPage(DataSite, 'P21')
"""
entity_type = 'property'
title_pattern = r'^P[1-9]\d*$'
def __init__(self, source, title=u""):
"""
Constructor.
@param source: data repository property is on
@type source: pywikibot.site.DataSite
@param title: page name of property, like "P##"
@type title: str
"""
if not title:
raise pywikibot.InvalidTitle("Property's title cannot be empty")
WikibasePage.__init__(self, source, title,
ns=source.property_namespace)
Property.__init__(self, source, self.id)
def get(self, force=False, *args, **kwargs):
"""
Fetch the property entity, and cache it.
@param force: override caching
@type force: bool
@raise NotImplementedError: a value in args or kwargs
"""
if args or kwargs:
raise NotImplementedError(
'PropertyPage.get only implements "force".')
data = WikibasePage.get(self, force)
if 'datatype' in self._content:
self._type = self._content['datatype']
data['datatype'] = self._type
return data
def newClaim(self, *args, **kwargs):
"""
Helper function to create a new claim object for this property.
@rtype: Claim
"""
return Claim(self.site, self.getID(), datatype=self.type,
*args, **kwargs)
# Add PropertyPage to the class attribute "types" after its declaration.
Property.types['wikibase-property'] = PropertyPage
class Claim(Property):
"""
A Claim on a Wikibase entity.
Claims are standard claims as well as references and qualifiers.
"""
TARGET_CONVERTER = {
'wikibase-item': lambda value, site:
ItemPage(site, 'Q' + str(value['numeric-id'])),
'wikibase-property': lambda value, site:
PropertyPage(site, 'P' + str(value['numeric-id'])),
'commonsMedia': lambda value, site:
FilePage(pywikibot.Site('commons', 'commons'), value), # T90492
'globe-coordinate': pywikibot.Coordinate.fromWikibase,
'geo-shape': pywikibot.WbGeoShape.fromWikibase,
'tabular-data': pywikibot.WbTabularData.fromWikibase,
'time': pywikibot.WbTime.fromWikibase,
'quantity': pywikibot.WbQuantity.fromWikibase,
'monolingualtext': lambda value, site:
pywikibot.WbMonolingualText.fromWikibase(value)
}
SNAK_TYPES = ('value', 'somevalue', 'novalue')
def __init__(self, site, pid, snak=None, hash=None, isReference=False,
isQualifier=False, **kwargs):
"""
Constructor.
Defined by the "snak" value, supplemented by site + pid
@param site: repository the claim is on
@type site: pywikibot.site.DataSite
@param pid: property id, with "P" prefix
@param snak: snak identifier for claim
@param hash: hash identifier for references
@param isReference: whether specified claim is a reference
@param isQualifier: whether specified claim is a qualifier
"""
Property.__init__(self, site, pid, **kwargs)
self.snak = snak
self.hash = hash
self.isReference = isReference
self.isQualifier = isQualifier
if self.isQualifier and self.isReference:
raise ValueError(u'Claim cannot be both a qualifier and reference.')
self.sources = []
self.qualifiers = OrderedDict()
self.target = None
self.snaktype = 'value'
self.rank = 'normal'
self.on_item = None # The item it's on
@classmethod
def fromJSON(cls, site, data):
"""
Create a claim object from JSON returned in the API call.
@param data: JSON containing claim data
@type data: dict
@rtype: Claim
"""
claim = cls(site, data['mainsnak']['property'],
datatype=data['mainsnak'].get('datatype', None))
if 'id' in data:
claim.snak = data['id']
elif 'hash' in data:
claim.hash = data['hash']
claim.snaktype = data['mainsnak']['snaktype']
if claim.getSnakType() == 'value':
value = data['mainsnak']['datavalue']['value']
# The default covers string, url types
if claim.type in cls.types or claim.type == 'wikibase-property':
claim.target = cls.TARGET_CONVERTER.get(
claim.type, lambda value, site: value)(value, site)
else:
pywikibot.warning(
'{0} datatype is not supported yet.'.format(claim.type))
claim.target = pywikibot.WbUnknown.fromWikibase(value)
if 'rank' in data: # References/Qualifiers don't have ranks
claim.rank = data['rank']
if 'references' in data:
for source in data['references']:
claim.sources.append(cls.referenceFromJSON(site, source))
if 'qualifiers' in data:
for prop in data['qualifiers-order']:
claim.qualifiers[prop] = [cls.qualifierFromJSON(site, qualifier)
for qualifier in data['qualifiers'][prop]]
return claim
@classmethod
def referenceFromJSON(cls, site, data):
"""
Create a dict of claims from reference JSON returned in the API call.
Reference objects are represented a
bit differently, and require some
more handling.
@rtype: dict
"""
source = OrderedDict()
# Before #84516 Wikibase did not implement snaks-order.
# https://gerrit.wikimedia.org/r/#/c/84516/
if 'snaks-order' in data:
prop_list = data['snaks-order']
else:
prop_list = data['snaks'].keys()
for prop in prop_list:
for claimsnak in data['snaks'][prop]:
claim = cls.fromJSON(site, {'mainsnak': claimsnak,
'hash': data['hash']})
claim.isReference = True
if claim.getID() not in source:
source[claim.getID()] = []
source[claim.getID()].append(claim)
return source
@classmethod
def qualifierFromJSON(cls, site, data):
"""
Create a Claim for a qualifier from JSON.
Qualifier objects are represented a bit
differently like references, but I'm not
sure if this even requires it's own function.
@rtype: Claim
"""
claim = cls.fromJSON(site, {'mainsnak': data,
'hash': data['hash']})
claim.isQualifier = True
return claim
def toJSON(self):
"""
Create dict suitable for the MediaWiki API.
@rtype: dict
"""
data = {
'mainsnak': {
'snaktype': self.snaktype,
'property': self.getID()
},
'type': 'statement'
}
if hasattr(self, 'snak') and self.snak is not None:
data['id'] = self.snak
if hasattr(self, 'rank') and self.rank is not None:
data['rank'] = self.rank
if self.getSnakType() == 'value':
data['mainsnak']['datatype'] = self.type
data['mainsnak']['datavalue'] = self._formatDataValue()
if self.isQualifier or self.isReference:
data = data['mainsnak']
if hasattr(self, 'hash') and self.hash is not None:
data['hash'] = self.hash
else:
if len(self.qualifiers) > 0:
data['qualifiers'] = {}
data['qualifiers-order'] = list(self.qualifiers.keys())
for prop, qualifiers in self.qualifiers.items():
for qualifier in qualifiers:
assert qualifier.isQualifier is True
data['qualifiers'][prop] = [qualifier.toJSON() for qualifier in qualifiers]
if len(self.sources) > 0:
data['references'] = []
for collection in self.sources:
reference = {'snaks': {}, 'snaks-order': list(collection.keys())}
for prop, val in collection.items():
reference['snaks'][prop] = []
for source in val:
assert source.isReference is True
src_data = source.toJSON()
if 'hash' in src_data:
if 'hash' not in reference:
reference['hash'] = src_data['hash']
del src_data['hash']
reference['snaks'][prop].append(src_data)
data['references'].append(reference)
return data
def setTarget(self, value):
"""
Set the target value in the local object.
@param value: The new target value.
@type value: object
@exception ValueError: if value is not of the type
required for the Claim type.
"""
value_class = self.types[self.type]
if not isinstance(value, value_class):
raise ValueError("%s is not type %s."
% (value, value_class))
self.target = value
def changeTarget(self, value=None, snaktype='value', **kwargs):
"""
Set the target value in the data repository.
@param value: The new target value.
@type value: object
@param snaktype: The new snak type.
@type snaktype: str ('value', 'somevalue', or 'novalue')
"""
if value:
self.setTarget(value)
data = self.repo.changeClaimTarget(self, snaktype=snaktype,
**kwargs)
# TODO: Re-create the entire item from JSON, not just id
self.snak = data['claim']['id']
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
def getTarget(self):
"""
Return the target value of this Claim.
None is returned if no target is set
@return: object
"""
return self.target
def getSnakType(self):
"""
Return the type of snak.
@return: str ('value', 'somevalue' or 'novalue')
@rtype: unicode
"""
return self.snaktype
def setSnakType(self, value):
"""
Set the type of snak.
@param value: Type of snak
@type value: str ('value', 'somevalue', or 'novalue')
"""
if value in self.SNAK_TYPES:
self.snaktype = value
else:
raise ValueError(
"snaktype must be 'value', 'somevalue', or 'novalue'.")
def getRank(self):
"""Return the rank of the Claim."""
return self.rank
def setRank(self, rank):
"""Set the rank of the Claim."""
self.rank = rank
def changeRank(self, rank):
"""Change the rank of the Claim and save."""
self.rank = rank
return self.repo.save_claim(self)
def changeSnakType(self, value=None, **kwargs):
"""
Save the new snak value.
TODO: Is this function really needed?
"""
if value:
self.setSnakType(value)
self.changeTarget(snaktype=self.getSnakType(), **kwargs)
def getSources(self):
"""
Return a list of sources, each being a list of Claims.
@rtype: list
"""
return self.sources
def addSource(self, claim, **kwargs):
"""
Add the claim as a source.
@param claim: the claim to add
@type claim: pywikibot.Claim
"""
self.addSources([claim], **kwargs)
def addSources(self, claims, **kwargs):
"""
Add the claims as one source.
@param claims: the claims to add
@type claims: list of pywikibot.Claim
"""
data = self.repo.editSource(self, claims, new=True, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
source = defaultdict(list)
for claim in claims:
claim.hash = data['reference']['hash']
source[claim.getID()].append(claim)
self.sources.append(source)
def removeSource(self, source, **kwargs):
"""
Remove the source. Call removeSources().
@param source: the source to remove
@type source: pywikibot.Claim
"""
self.removeSources([source], **kwargs)
def removeSources(self, sources, **kwargs):
"""
Remove the sources.
@param sources: the sources to remove
@type sources: list of pywikibot.Claim
"""
data = self.repo.removeSources(self, sources, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
for source in sources:
source_dict = defaultdict(list)
source_dict[source.getID()].append(source)
self.sources.remove(source_dict)
def addQualifier(self, qualifier, **kwargs):
"""Add the given qualifier.
@param qualifier: the qualifier to add
@type qualifier: Claim
"""
data = self.repo.editQualifier(self, qualifier, **kwargs)
qualifier.isQualifier = True
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
if qualifier.getID() in self.qualifiers:
self.qualifiers[qualifier.getID()].append(qualifier)
else:
self.qualifiers[qualifier.getID()] = [qualifier]
def removeQualifier(self, qualifier, **kwargs):
"""
Remove the qualifier. Call removeQualifiers().
@param qualifier: the qualifier to remove
@type qualifier: Claim
"""
self.removeQualifiers([qualifier], **kwargs)
def removeQualifiers(self, qualifiers, **kwargs):
"""
Remove the qualifiers.
@param qualifiers: the qualifiers to remove
@type qualifiers: list Claim
"""
data = self.repo.remove_qualifiers(self, qualifiers, **kwargs)
self.on_item.latest_revision_id = data['pageinfo']['lastrevid']
for qualifier in qualifiers:
self.qualifiers[qualifier.getID()].remove(qualifier)
def target_equals(self, value):
"""
Check whether the Claim's target is equal to specified value.
The function checks for:
- WikibasePage ID equality
- WbTime year equality
- Coordinate equality, regarding precision
- WbMonolingualText text equality
- direct equality
@param value: the value to compare with
@return: true if the Claim's target is equal to the value provided,
false otherwise
@rtype: bool
"""
if (isinstance(self.target, WikibasePage) and
isinstance(value, basestring)):
return self.target.id == value
if (isinstance(self.target, pywikibot.WbTime) and
not isinstance(value, pywikibot.WbTime)):
return self.target.year == int(value)
if (isinstance(self.target, pywikibot.Coordinate) and
isinstance(value, basestring)):
coord_args = [float(x) for x in value.split(',')]
if len(coord_args) >= 3:
precision = coord_args[2]
else:
precision = 0.0001 # Default value (~10 m at equator)
try:
if self.target.precision is not None:
precision = max(precision, self.target.precision)
except TypeError:
pass
return (abs(self.target.lat - coord_args[0]) <= precision and
abs(self.target.lon - coord_args[1]) <= precision)
if (isinstance(self.target, pywikibot.WbMonolingualText) and
isinstance(value, basestring)):
return self.target.text == value
return self.target == value
def has_qualifier(self, qualifier_id, target):
"""
Check whether Claim contains specified qualifier.
@param qualifier_id: id of the qualifier
@type qualifier_id: str
@param target: qualifier target to check presence of
@return: true if the qualifier was found, false otherwise
@rtype: bool
"""
if self.isQualifier or self.isReference:
raise ValueError(u'Qualifiers and references cannot have '
u'qualifiers.')
for qualifier in self.qualifiers.get(qualifier_id, []):
if qualifier.target_equals(target):
return True
return False
def _formatValue(self):
"""
Format the target into the proper JSON value that Wikibase wants.
@return: JSON value
@rtype: dict
"""
if self.type in ('wikibase-item', 'wikibase-property'):
value = {'entity-type': self.getTarget().entity_type,
'numeric-id': self.getTarget().getID(numeric=True)}
elif self.type in ('string', 'url', 'math', 'external-id'):
value = self.getTarget()
elif self.type == 'commonsMedia':
value = self.getTarget().title(withNamespace=False)
elif self.type in ('globe-coordinate', 'time',
'quantity', 'monolingualtext',
'geo-shape', 'tabular-data'):
value = self.getTarget().toWikibase()
else: # WbUnknown
pywikibot.warning(
'{0} datatype is not supported yet.'.format(self.type))
value = self.getTarget().toWikibase()
return value
def _formatDataValue(self):
"""
Format the target into the proper JSON datavalue that Wikibase wants.
@return: Wikibase API representation with type and value.
@rtype: dict
"""
return {'value': self._formatValue(),
'type': self.value_types.get(self.type, self.type)
}
class Revision(DotReadableDict):
"""A structure holding information about a single revision of a Page."""
HistEntry = namedtuple('HistEntry', ['revid',
'timestamp',
'user',
'comment'])
FullHistEntry = namedtuple('FullHistEntry', ['revid',
'timestamp',
'user',
'text',
'rollbacktoken'])
def __init__(self, revid, timestamp, user, anon=False, comment=u"",
text=None, minor=False, rollbacktoken=None, parentid=None,
contentmodel=None, sha1=None):
"""
Constructor.
All parameters correspond to object attributes (e.g., revid
parameter is stored as self.revid)
@param revid: Revision id number
@type revid: int
@param text: Revision wikitext.
@type text: unicode, or None if text not yet retrieved
@param timestamp: Revision time stamp
@type timestamp: pywikibot.Timestamp
@param user: user who edited this revision
@type user: unicode
@param anon: user is unregistered
@type anon: bool
@param comment: edit comment text
@type comment: unicode
@param minor: edit flagged as minor
@type minor: bool
@param rollbacktoken: rollback token
@type rollbacktoken: unicode
@param parentid: id of parent Revision (v1.16+)
@type parentid: long
@param contentmodel: content model label (v1.21+)
@type contentmodel: unicode
@param sha1: sha1 of revision text (v1.19+)
@type sha1: unicode
"""
self.revid = revid
self.text = text
self.timestamp = timestamp
self.user = user
self.anon = anon
self.comment = comment
self.minor = minor
self.rollbacktoken = rollbacktoken
self._parent_id = parentid
self._content_model = contentmodel
self._sha1 = sha1
@property
def parent_id(self):
"""
Return id of parent/previous revision.
Returns 0 if there is no previous revision
@return: id of parent/previous revision
@rtype: int or long
@raises AssertionError: parent id not supplied to the constructor
"""
if self._parent_id is None:
raise AssertionError(
'Revision %d was instantiated without a parent id'
% self.revid)
return self._parent_id
@property
def content_model(self):
"""
Return content model of the revision.
@return: content model
@rtype: str
@raises AssertionError: content model not supplied to the constructor
which always occurs for MediaWiki versions lower than 1.21.
"""
# TODO: T102735: Add a sane default of 'wikitext' and others for <1.21
if self._content_model is None:
raise AssertionError(
'Revision %d was instantiated without a content model'
% self.revid)
return self._content_model
@property
def sha1(self):
"""
Return and cache SHA1 checksum of the text.
@return: if the SHA1 checksum is cached it'll be returned which is the
case when it was requested from the API. Otherwise it'll use the
revision's text to calculate the checksum (encoding it using UTF8
first). That calculated checksum will be cached too and returned on
future calls. If the text is None (not queried) it will just return
None and does not cache anything.
@rtype: str or None
"""
if self._sha1 is None:
if self.text is None:
# No text? No sha1 then.
return None
self._sha1 = hashlib.sha1(self.text.encode('utf8')).hexdigest()
return self._sha1
def hist_entry(self):
"""Return a namedtuple with a Page history record."""
return Revision.HistEntry(self.revid, self.timestamp, self.user,
self.comment)
def full_hist_entry(self):
"""Return a namedtuple with a Page full history record."""
return Revision.FullHistEntry(self.revid, self.timestamp, self.user,
self.text, self.rollbacktoken)
@staticmethod
def _thank(revid, site, source='pywikibot'):
"""Thank a user for this revision.
@param site: The Site object for this revision.
@type site: Site
@param source: An optional source to pass to the API.
@type source: str
"""
site.thank_revision(revid, source)
class FileInfo(DotReadableDict):
"""
A structure holding imageinfo of latest rev. of FilePage.
All keys of API imageinfo dictionary are mapped to FileInfo attributes.
Attributes can be retrieved both as self['key'] or self.key.
Following attributes will be returned:
- timestamp, user, comment, url, size, sha1, mime, metadata
- archivename (not for latest revision)
See Site.loadimageinfo() for details.
Note: timestamp will be casted to pywikibot.Timestamp.
"""
def __init__(self, file_revision):
"""
Create class with the dictionary returned by site.loadimageinfo().
@param page: FilePage containing the image.
@type page: FilePage object
"""
self.__dict__.update(file_revision)
self.timestamp = pywikibot.Timestamp.fromISOformat(self.timestamp)
def __eq__(self, other):
"""Test if two File_info objects are equal."""
return self.__dict__ == other.__dict__
class Link(ComparableMixin):
"""
A MediaWiki link (local or interwiki).
Has the following attributes:
- site: The Site object for the wiki linked to
- namespace: The namespace of the page linked to (int)
- title: The title of the page linked to (unicode); does not include
namespace or section
- section: The section of the page linked to (unicode or None); this
contains any text following a '#' character in the title
- anchor: The anchor text (unicode or None); this contains any text
following a '|' character inside the link
"""
illegal_titles_pattern = re.compile(
# Matching titles will be held as illegal.
r'''[\x00-\x1f\x23\x3c\x3e\x5b\x5d\x7b\x7c\x7d\x7f]'''
# URL percent encoding sequences interfere with the ability
# to round-trip titles -- you can't link to them consistently.
u'|%[0-9A-Fa-f]{2}'
# XML/HTML character references produce similar issues.
u'|&[A-Za-z0-9\x80-\xff]+;'
u'|&#[0-9]+;'
u'|&#x[0-9A-Fa-f]+;'
)
def __init__(self, text, source=None, defaultNamespace=0):
"""
Constructor.
@param text: the link text (everything appearing between [[ and ]]
on a wiki page)
@type text: unicode
@param source: the Site on which the link was found (not necessarily
the site to which the link refers)
@type source: Site or BasePage
@param defaultNamespace: a namespace to use if the link does not
contain one (defaults to 0)
@type defaultNamespace: int
@raises UnicodeError: text could not be converted to unicode.
On Python 2.6.6 without unicodedata2, this could also be raised
if the text contains combining characters.
See https://phabricator.wikimedia.org/T102461
"""
source_is_page = isinstance(source, BasePage)
assert source is None or source_is_page or isinstance(source, pywikibot.site.BaseSite), \
"source parameter should be either a Site or Page object"
if source_is_page:
self._source = source.site
else:
self._source = source or pywikibot.Site()
self._text = text
# See bug T104864, defaultNamespace might have been deleted.
try:
self._defaultns = self._source.namespaces[defaultNamespace]
except KeyError:
self._defaultns = defaultNamespace
# preprocess text (these changes aren't site-dependent)
# First remove anchor, which is stored unchanged, if there is one
if u"|" in self._text:
self._text, self._anchor = self._text.split(u"|", 1)
else:
self._anchor = None
# Convert URL-encoded characters to unicode
encodings = [self._source.encoding()] + list(self._source.encodings())
self._text = url2unicode(self._text, encodings=encodings)
# Clean up the name, it can come from anywhere.
# Convert HTML entities to unicode
t = html2unicode(self._text)
# Normalize unicode string to a NFC (composed) format to allow
# proper string comparisons to strings output from MediaWiki API.
# Due to Python issue 10254, this is not possible on Python 2.6.6
# if the string contains combining characters. See T102461.
if (PYTHON_VERSION == (2, 6, 6) and
unicodedata.__name__ != 'unicodedata2' and
any(unicodedata.combining(c) for c in t)):
raise UnicodeError(
'Link(%r, %s): combining characters detected, which are '
'not supported by Pywikibot on Python 2.6.6. See '
'https://phabricator.wikimedia.org/T102461'
% (t, self._source))
t = unicodedata.normalize('NFC', t)
# This code was adapted from Title.php : secureAndSplit()
#
if u'\ufffd' in t:
raise pywikibot.Error(
"Title contains illegal char (\\uFFFD 'REPLACEMENT CHARACTER')")
# Cleanup whitespace
t = re.sub(
'[_ \xa0\u1680\u180E\u2000-\u200A\u2028\u2029\u202F\u205F\u3000]+',
' ', t)
# Strip spaces at both ends
t = t.strip()
# Remove left-to-right and right-to-left markers.
t = t.replace(u"\u200e", u"").replace(u"\u200f", u"")
self._text = t
if source_is_page:
self._text = source.title(withSection=False) + self._text
def __repr__(self):
"""Return a more complete string representation."""
return "pywikibot.page.Link(%r, %r)" % (self.title, self.site)
def parse_site(self):
"""
Parse only enough text to determine which site the link points to.
This method does not parse anything after the first ":"; links
with multiple interwiki prefixes (such as "wikt:fr:Parlais") need
to be re-parsed on the first linked wiki to get the actual site.
@return: The family name and site code for the linked site. If the site
is not supported by the configured families it returns None instead
of a str.
@rtype: tuple
"""
t = self._text
fam = self._source.family
code = self._source.code
while u":" in t:
# Initial colon
if t.startswith(u":"):
# remove the colon but continue processing
# remove any subsequent whitespace
t = t.lstrip(u":").lstrip(u" ")
continue
prefix = t[:t.index(u":")].lower() # part of text before :
ns = self._source.namespaces.lookup_name(prefix)
if ns:
# The prefix is a namespace in the source wiki
return (fam.name, code)
if prefix in fam.langs:
# prefix is a language code within the source wiki family
return (fam.name, prefix)
try:
newsite = self._source.interwiki(prefix)
except KeyError:
break # text before : doesn't match any known prefix
except SiteDefinitionError:
return (None, None)
else:
return (newsite.family.name, newsite.code)
return (fam.name, code) # text before : doesn't match any known prefix
def parse(self):
"""
Parse wikitext of the link.
Called internally when accessing attributes.
"""
self._site = self._source
self._namespace = self._defaultns
self._is_interwiki = False
t = self._text
ns_prefix = False
# This code was adapted from Title.php : secureAndSplit()
#
first_other_site = None
while u":" in t:
# Initial colon indicates main namespace rather than default
if t.startswith(u":"):
self._namespace = self._site.namespaces[0]
# remove the colon but continue processing
# remove any subsequent whitespace
t = t.lstrip(u":").lstrip(u" ")
continue
prefix = t[:t.index(u":")].lower()
ns = self._site.namespaces.lookup_name(prefix)
if ns:
# Ordinary namespace
t = t[t.index(u":"):].lstrip(u":").lstrip(u" ")
self._namespace = ns
ns_prefix = True
break
try:
newsite = self._site.interwiki(prefix)
except KeyError:
break # text before : doesn't match any known prefix
except SiteDefinitionError as e:
raise SiteDefinitionError(
u'{0} is not a local page on {1}, and the interwiki prefix '
'{2} is not supported by Pywikibot!:\n{3}'.format(
self._text, self._site, prefix, e))
else:
t = t[t.index(u":"):].lstrip(u":").lstrip(u" ")
if first_other_site:
if not self._site.local_interwiki(prefix):
raise pywikibot.InvalidTitle(
u'{0} links to a non local site {1} via an '
'interwiki link to {2}.'.format(
self._text, newsite, first_other_site))
elif newsite != self._source:
first_other_site = newsite
self._site = newsite
self._is_interwiki = True
if u"#" in t:
t, sec = t.split(u'#', 1)
t, self._section = t.rstrip(), sec.lstrip()
else:
self._section = None
if ns_prefix:
# 'namespace:' is not a valid title
if not t:
raise pywikibot.InvalidTitle(
u"'{0}' has no title.".format(self._text))
elif ':' in t and self._namespace >= 0: # < 0 don't have talk
other_ns = self._site.namespaces[self._namespace - 1
if self._namespace % 2 else
self._namespace + 1]
if '' in other_ns: # other namespace uses empty str as ns
next_ns = t[:t.index(':')]
if self._site.namespaces.lookup_name(next_ns):
raise pywikibot.InvalidTitle(
u"The (non-)talk page of '{0}' is a valid title "
"in another namespace.".format(self._text))
# Reject illegal characters.
m = Link.illegal_titles_pattern.search(t)
if m:
raise pywikibot.InvalidTitle(
u"%s contains illegal char(s) %s" % (repr(t), repr(m.group(0))))
# Pages with "/./" or "/../" appearing in the URLs will
# often be unreachable due to the way web browsers deal
# * with 'relative' URLs. Forbid them explicitly.
if u'.' in t and (
t == u'.' or t == u'..' or
t.startswith(u'./') or
t.startswith(u'../') or
u'/./' in t or
u'/../' in t or
t.endswith(u'/.') or
t.endswith(u'/..')
):
raise pywikibot.InvalidTitle(
u"(contains . / combinations): '%s'"
% self._text)
# Magic tilde sequences? Nu-uh!
if u"~~~" in t:
raise pywikibot.InvalidTitle(u"(contains ~~~): '%s'" % self._text)
if self._namespace != -1 and len(t) > 255:
raise pywikibot.InvalidTitle(u"(over 255 bytes): '%s'" % t)
# "empty" local links can only be self-links
# with a fragment identifier.
if not t.strip() and not self._is_interwiki:
raise pywikibot.InvalidTitle("The link does not contain a page "
"title")
if self._site.namespaces[self._namespace].case == 'first-letter':
t = first_upper(t)
self._title = t
# define attributes, to be evaluated lazily
@property
def site(self):
"""
Return the site of the link.
@rtype: unicode
"""
if not hasattr(self, "_site"):
self.parse()
return self._site
@property
def namespace(self):
"""
Return the namespace of the link.
@rtype: unicode
"""
if not hasattr(self, "_namespace"):
self.parse()
return self._namespace
@property
def title(self):
"""
Return the title of the link.
@rtype: unicode
"""
if not hasattr(self, "_title"):
self.parse()
return self._title
@property
def section(self):
"""
Return the section of the link.
@rtype: unicode
"""
if not hasattr(self, "_section"):
self.parse()
return self._section
@property
def anchor(self):
"""
Return the anchor of the link.
@rtype: unicode
"""
if not hasattr(self, "_anchor"):
self.parse()
return self._anchor
def canonical_title(self):
"""Return full page title, including localized namespace."""
# Avoid that ':' will be added to the title for Main ns.
if self.namespace != Namespace.MAIN:
return "%s:%s" % (self.site.namespace(self.namespace),
self.title)
else:
return self.title
def ns_title(self, onsite=None):
"""
Return full page title, including namespace.
@param onsite: site object
if specified, present title using onsite local namespace,
otherwise use self canonical namespace.
@raise pywikibot.Error: no corresponding namespace is found in onsite
"""
if onsite is None:
name = self.namespace.canonical_name
else:
# look for corresponding ns in onsite by name comparison
for alias in self.namespace:
namespace = onsite.namespaces.lookup_name(alias)
if namespace is not None:
name = namespace.custom_name
break
else:
# not found
raise pywikibot.Error(
u'No corresponding namespace found for namespace %s on %s.'
% (self.namespace, onsite))
if self.namespace != Namespace.MAIN:
return u'%s:%s' % (name, self.title)
else:
return self.title
def astext(self, onsite=None):
"""
Return a text representation of the link.
@param onsite: if specified, present as a (possibly interwiki) link
from the given site; otherwise, present as an internal link on
the source site.
"""
if onsite is None:
onsite = self._source
title = self.title
if self.namespace != Namespace.MAIN:
title = onsite.namespace(self.namespace) + ":" + title
if self.section:
title = title + "#" + self.section
if onsite == self.site:
return u'[[%s]]' % title
if onsite.family == self.site.family:
return u'[[%s:%s]]' % (self.site.code, title)
if self.site.family.name == self.site.code:
# use this form for sites like commons, where the
# code is the same as the family name
return u'[[%s:%s]]' % (self.site.code,
title)
return u'[[%s:%s:%s]]' % (self.site.family.name,
self.site.code,
title)
if sys.version_info[0] > 2:
def __str__(self):
"""Return a string representation."""
return self.__unicode__()
else:
def __str__(self):
"""Return a string representation."""
return self.astext().encode("ascii", "backslashreplace")
def _cmpkey(self):
"""
Key for comparison of Link objects.
Link objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Link objects are sortable by site, then namespace, then title.
"""
return (self.site, self.namespace, self.title)
def __unicode__(self):
"""
Return a unicode string representation.
@rtype: unicode
"""
return self.astext()
def __hash__(self):
"""A stable identifier to be used as a key in hash-tables."""
return hash(u'%s:%s:%s' % (self.site.family.name,
self.site.code,
self.title))
@classmethod
def fromPage(cls, page, source=None):
"""
Create a Link to a Page.
@param page: target Page
@type page: Page
@param source: Link from site source
@param source: Site
@rtype: Link
"""
link = cls.__new__(cls)
link._site = page.site
link._section = page.section()
link._namespace = page.namespace()
link._title = page.title(withNamespace=False,
allowInterwiki=False,
withSection=False)
link._anchor = None
link._source = source or pywikibot.Site()
return link
@classmethod
def langlinkUnsafe(cls, lang, title, source):
"""
Create a "lang:title" Link linked from source.
Assumes that the lang & title come clean, no checks are made.
@param lang: target site code (language)
@type lang: str
@param title: target Page
@type title: unicode
@param source: Link from site source
@param source: Site
@rtype: Link
"""
link = cls.__new__(cls)
if source.family.interwiki_forward:
link._site = pywikibot.Site(lang, source.family.interwiki_forward)
else:
link._site = pywikibot.Site(lang, source.family.name)
link._section = None
link._source = source
link._namespace = 0
if ':' in title:
ns, t = title.split(':', 1)
ns = link._site.namespaces.lookup_name(ns)
if ns:
link._namespace = ns
title = t
if u"#" in title:
t, sec = title.split(u'#', 1)
title, link._section = t.rstrip(), sec.lstrip()
else:
link._section = None
link._title = title
return link
@classmethod
def create_separated(cls, link, source, default_namespace=0, section=None,
label=None):
"""
Create a new instance but overwrite section or label.
The returned Link instance is already parsed.
@param link: The original link text.
@type link: str
@param source: The source of the link.
@type source: Site
@param default_namespace: The namespace this link uses when no namespace
is defined in the link text.
@type default_namespace: int
@param section: The new section replacing the one in link. If None
(default) it doesn't replace it.
@type section: None or str
@param label: The new label replacing the one in link. If None (default)
it doesn't replace it.
"""
link = cls(link, source, default_namespace)
link.parse()
if section:
link._section = section
elif section is not None:
link._section = None
if label:
link._anchor = label
elif label is not None:
link._anchor = ''
return link
# Utility functions for parsing page titles
def html2unicode(text, ignore=None):
"""
Replace HTML entities with equivalent unicode.
@param ignore: HTML entities to ignore
@param ignore: list of int
@rtype: unicode
"""
if ignore is None:
ignore = []
# This regular expression will match any decimal and hexadecimal entity and
# also entities that might be named entities.
entityR = re.compile(
r'&(#(?P<decimal>\d+)|#x(?P<hex>[0-9a-fA-F]+)|(?P<name>[A-Za-z]+));')
# These characters are Html-illegal, but sadly you *can* find some of
# these and converting them to chr(decimal) is unsuitable
convertIllegalHtmlEntities = {
128: 8364, # €
130: 8218, # ‚
131: 402, # ƒ
132: 8222, # „
133: 8230, # …
134: 8224, # †
135: 8225, # ‡
136: 710, # ˆ
137: 8240, # ‰
138: 352, # Š
139: 8249, # ‹
140: 338, # Œ
142: 381, # Ž
145: 8216, # ‘
146: 8217, # ’
147: 8220, # “
148: 8221, # ”
149: 8226, # •
150: 8211, # –
151: 8212, # —
152: 732, # ˜
153: 8482, # ™
154: 353, # š
155: 8250, # ›
156: 339, # œ
158: 382, # ž
159: 376 # Ÿ
}
# ensuring that illegal   and , which have no known values,
# don't get converted to chr(129), chr(141) or chr(157)
ignore = (set(map(lambda x: convertIllegalHtmlEntities.get(x, x), ignore)) |
set([129, 141, 157]))
def handle_entity(match):
if match.group('decimal'):
unicodeCodepoint = int(match.group('decimal'))
elif match.group('hex'):
unicodeCodepoint = int(match.group('hex'), 16)
elif match.group('name'):
name = match.group('name')
if name in htmlentitydefs.name2codepoint:
# We found a known HTML entity.
unicodeCodepoint = htmlentitydefs.name2codepoint[name]
else:
unicodeCodepoint = False
if unicodeCodepoint in convertIllegalHtmlEntities:
unicodeCodepoint = convertIllegalHtmlEntities[unicodeCodepoint]
if unicodeCodepoint and unicodeCodepoint not in ignore:
if unicodeCodepoint > sys.maxunicode:
# solve narrow Python 2 build exception (UTF-16)
return eval("'\\U{0:08x}'".format(unicodeCodepoint))
else:
return chr(unicodeCodepoint)
else:
# Leave the entity unchanged
return match.group(0)
return entityR.sub(handle_entity, text)
def UnicodeToAsciiHtml(s):
"""Convert unicode to a str using HTML entities."""
html = []
for c in s:
cord = ord(c)
if 31 < cord < 128:
html.append(c)
else:
html.append('&#%d;' % cord)
return ''.join(html)
def unicode2html(x, encoding):
"""
Convert unicode string to requested HTML encoding.
Attempt to encode the
string into the desired format; if that doesn't work, encode the unicode
into HTML &#; entities. If it does work, return it unchanged.
@param x: String to update
@type x: unicode
@param encoding: Encoding to use
@type encoding: str
@rtype: str
"""
try:
x.encode(encoding)
except UnicodeError:
x = UnicodeToAsciiHtml(x)
return x
@deprecated_args(site2=None, site='encodings')
def url2unicode(title, encodings='utf-8'):
"""
Convert URL-encoded text to unicode using several encoding.
Uses the first encoding that doesn't cause an error.
@param title: URL-encoded character data to convert
@type title: str
@param encodings: Encodings to attempt to use during conversion.
@type encodings: str, list or Site
@rtype: unicode
@raise UnicodeError: Could not convert using any encoding.
"""
if isinstance(encodings, basestring):
encodings = [encodings]
elif isinstance(encodings, pywikibot.site.BaseSite):
# create a list of all possible encodings for both hint sites
site = encodings
encodings = [site.encoding()] + list(site.encodings())
firstException = None
for enc in encodings:
try:
t = title.encode(enc)
t = unquote_to_bytes(t)
return t.decode(enc)
except UnicodeError as ex:
if not firstException:
firstException = ex
pass
# Couldn't convert, raise the original exception
raise firstException
| 37.159091 | 97 | 0.577366 |
328cc88e3864c4653e1c1f80b0b9ee978432198a | 4,484 | bzl | Python | scala/scala_import.bzl | systemlogic/rules_scala | dec85a95d13f403f360cb51829f1804d0518165a | [
"Apache-2.0"
] | 1 | 2021-04-28T21:40:28.000Z | 2021-04-28T21:40:28.000Z | scala/scala_import.bzl | samschlegel/rules_scala | d51277e59b635c05e86c2760eb4ee1b7d2f51770 | [
"Apache-2.0"
] | null | null | null | scala/scala_import.bzl | samschlegel/rules_scala | d51277e59b635c05e86c2760eb4ee1b7d2f51770 | [
"Apache-2.0"
] | null | null | null | #intellij part is tested manually, tread lightly when changing there
#if you change make sure to manually re-import an intellij project and see imports
#are resolved (not red) and clickable
def _scala_import_impl(ctx):
target_data = _code_jars_and_intellij_metadata_from(ctx.attr.jars)
(current_target_compile_jars,
intellij_metadata) = (target_data.code_jars, target_data.intellij_metadata)
current_jars = depset(current_target_compile_jars)
exports = _collect(ctx.attr.exports)
transitive_runtime_jars = _collect_runtime(ctx.attr.runtime_deps)
jars = _collect(ctx.attr.deps)
jars2labels = {}
_collect_labels(ctx.attr.deps, jars2labels)
_collect_labels(ctx.attr.exports, jars2labels) #untested
_add_labels_of_current_code_jars(
depset(transitive = [current_jars, exports.compile_jars]),
ctx.label,
jars2labels
) #last to override the label of the export compile jars to the current target
return struct(
scala = struct(outputs = struct(jars = intellij_metadata),
),
jars_to_labels = jars2labels,
providers = [
_create_provider(current_jars, transitive_runtime_jars, jars, exports)
],
)
def _create_provider(current_target_compile_jars, transitive_runtime_jars, jars,
exports):
return java_common.create_provider(
use_ijar = False,
compile_time_jars = depset(
transitive = [current_target_compile_jars, exports.compile_jars]),
transitive_compile_time_jars = depset(transitive = [
jars.transitive_compile_jars, current_target_compile_jars,
exports.transitive_compile_jars
]),
transitive_runtime_jars = depset(transitive = [
transitive_runtime_jars, jars.transitive_runtime_jars,
current_target_compile_jars, exports.transitive_runtime_jars
]),
)
def _add_labels_of_current_code_jars(code_jars, label, jars2labels):
for jar in code_jars.to_list():
jars2labels[jar.path] = label
def _code_jars_and_intellij_metadata_from(jars):
code_jars = []
intellij_metadata = []
for jar in jars:
current_jar_code_jars = _filter_out_non_code_jars(jar.files)
code_jars += current_jar_code_jars
for current_class_jar in current_jar_code_jars: #intellij, untested
intellij_metadata.append(
struct(
ijar = None,
class_jar = current_class_jar,
source_jar = None,
source_jars = [],
))
return struct(code_jars = code_jars, intellij_metadata = intellij_metadata)
def _filter_out_non_code_jars(files):
return [file for file in files.to_list() if not _is_source_jar(file)]
def _is_source_jar(file):
return file.basename.endswith("-sources.jar")
# TODO: it seems this could be reworked to use java_common.merge
def _collect(deps):
transitive_compile_jars = []
runtime_jars = []
compile_jars = []
for dep_target in deps:
java_provider = dep_target[JavaInfo]
compile_jars.append(java_provider.compile_jars)
transitive_compile_jars.append(java_provider.transitive_compile_time_jars)
runtime_jars.append(java_provider.transitive_runtime_jars)
return struct(
transitive_runtime_jars = depset(transitive = runtime_jars),
transitive_compile_jars = depset(transitive = transitive_compile_jars),
compile_jars = depset(transitive = compile_jars))
def _collect_labels(deps, jars2labels):
for dep_target in deps:
java_provider = dep_target[JavaInfo]
_transitively_accumulate_labels(dep_target, java_provider, jars2labels)
def _transitively_accumulate_labels(dep_target, java_provider, jars2labels):
if hasattr(dep_target, "jars_to_labels"):
jars2labels.update(dep_target.jars_to_labels)
#scala_library doesn't add labels to the direct dependency itself
for jar in java_provider.compile_jars.to_list():
jars2labels[jar.path] = dep_target.label
def _collect_runtime(runtime_deps):
jar_deps = []
for dep_target in runtime_deps:
java_provider = dep_target[JavaInfo]
jar_deps.append(java_provider.transitive_runtime_jars)
return depset(transitive = jar_deps)
scala_import = rule(
implementation = _scala_import_impl,
attrs = {
"jars": attr.label_list(
allow_files = True
), #current hidden assumption is that these point to full, not ijar'd jars
"deps": attr.label_list(),
"runtime_deps": attr.label_list(),
"exports": attr.label_list()
},
)
| 37.680672 | 83 | 0.735058 |
24d9df8e8632b5b76e3365c767b17c08c7211c75 | 1,657 | py | Python | docs/guide/class_templates/agents.py | jgori-ouistiti/CoopIHC | 0fe24c618a430517c1394625275faff3ce344f7f | [
"MIT"
] | null | null | null | docs/guide/class_templates/agents.py | jgori-ouistiti/CoopIHC | 0fe24c618a430517c1394625275faff3ce344f7f | [
"MIT"
] | 52 | 2021-11-23T13:49:50.000Z | 2022-03-15T12:28:18.000Z | docs/guide/class_templates/agents.py | jgori-ouistiti/interaction-agents | 922d9bddb2b14784e32c4639b66cec302e80e13a | [
"MIT"
] | 1 | 2022-03-08T11:10:24.000Z | 2022-03-08T11:10:24.000Z | from coopihc.agents import BaseAgent
class MyNewAgent(BaseAgent):
""" Use this class as template to build your new agent.
:param myparameter(type): explain the parameter here
:return: what is returned
:meta public:
"""
def __init__(self, arg1, *args, **kwargs):
self.arg1 = arg1
agent_policy = kwargs.get('agent_policy')
if agent_policy is None:
# agent_policy =
observation_engine = kwargs.get('observation_engine')
if observation_engine is None:
# observation_engine =
inference_engine = kwargs.get('inference_engine')
if inference_engine is None:
# inference_engine =
state = kwargs.get('state')
if state is None:
# state =
super().__init__('user',
state = state,
policy = agent_policy,
observation_engine = observation_engine,
inference_engine = inference_engine
)
self.handbook['render_mode'].extend(['plot', 'text', 'log'])
_arg1 = {'value': arg1, 'meaning': 'meaning of arg1'}
self.handbook['parameters'].extend([_arg1])
def finit(self):
## write finit code here
def reset(self, dic = None):
if dic is None:
super().reset()
## write reset code here.
if dic is not None:
super().reset(dic = dic)
def render(self, *args, **kwargs):
mode = kwargs.get('mode')
if mode is None:
mode = 'text'
## write render code here
| 27.163934 | 68 | 0.54315 |
9f59bb5fe48ccb9496658ace1cc4cd2c830c1c94 | 7,818 | py | Python | magenta/music/pianoroll_lib_test.py | vaipatel/magenta | 8a828116c2a73c26724204987d2b5bddaab31a7e | [
"Apache-2.0"
] | 16 | 2016-09-02T04:59:30.000Z | 2022-01-11T10:38:29.000Z | magenta/music/pianoroll_lib_test.py | Kiku-git/magenta | b36c65466745ff1e056dca40179ae71306a0ca5b | [
"Apache-2.0"
] | 2 | 2016-09-25T16:39:59.000Z | 2016-11-18T17:43:41.000Z | magenta/music/pianoroll_lib_test.py | Kiku-git/magenta | b36c65466745ff1e056dca40179ae71306a0ca5b | [
"Apache-2.0"
] | 10 | 2016-09-02T04:59:32.000Z | 2021-09-29T06:57:24.000Z | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pianoroll_lib."""
import copy
from magenta.common import testing_lib as common_testing_lib
from magenta.music import pianoroll_lib
from magenta.music import sequences_lib
from magenta.music import testing_lib
from magenta.protobuf import music_pb2
import tensorflow as tf
class PianorollLibTest(tf.test.TestCase):
def setUp(self):
self.maxDiff = None # pylint:disable=invalid-name
self.note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
tempos: {
qpm: 60
}
ticks_per_quarter: 220
""")
def testFromQuantizedNoteSequence(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(20, 100, 0.0, 4.0), (24, 100, 0.0, 1.0), (26, 100, 0.0, 3.0),
(110, 100, 1.0, 2.0), (24, 100, 2.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
pianoroll_seq = list(pianoroll_lib.PianorollSequence(quantized_sequence))
expected_pianoroll_seq = [
(3, 5),
(5,),
(3, 5),
(3,),
]
self.assertEqual(expected_pianoroll_seq, pianoroll_seq)
def testFromQuantizedNoteSequence_SplitRepeats(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(0, 100, 0.0, 2.0), (0, 100, 2.0, 4.0), (1, 100, 0.0, 2.0),
(2, 100, 2.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
pianoroll_seq = list(pianoroll_lib.PianorollSequence(
quantized_sequence, min_pitch=0, split_repeats=True))
expected_pianoroll_seq = [
(0, 1),
(1,),
(0, 2),
(0, 2),
]
self.assertEqual(expected_pianoroll_seq, pianoroll_seq)
def testFromEventsList_ShiftRange(self):
pianoroll_seq = list(pianoroll_lib.PianorollSequence(
events_list=[(0, 1), (2, 3), (4, 5), (6,)], steps_per_quarter=1,
min_pitch=1, max_pitch=4, shift_range=True))
expected_pianoroll_seq = [
(0,),
(1, 2),
(3,),
(),
]
self.assertEqual(expected_pianoroll_seq, pianoroll_seq)
def testToSequence(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 0.0, 1.0),
(67, 100, 3.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
pianoroll_seq = pianoroll_lib.PianorollSequence(quantized_sequence)
pianoroll_seq_ns = pianoroll_seq.to_sequence(qpm=60.0)
# Make comparison easier
pianoroll_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, pianoroll_seq_ns)
def testToSequenceWithBaseNoteSequence(self):
pianoroll_seq = pianoroll_lib.PianorollSequence(
steps_per_quarter=1, start_step=1)
pianoroll_events = [(39, 43), (39, 43)]
for event in pianoroll_events:
pianoroll_seq.append(event)
base_seq = copy.deepcopy(self.note_sequence)
testing_lib.add_track_to_sequence(
base_seq, 0, [(60, 100, 0.0, 1.0)])
pianoroll_seq_ns = pianoroll_seq.to_sequence(
qpm=60.0, base_note_sequence=base_seq)
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 1.0), (60, 100, 1.0, 3.0), (64, 100, 1.0, 3.0)])
# Make comparison easier
pianoroll_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, pianoroll_seq_ns)
def testSetLengthAddSteps(self):
pianoroll_seq = pianoroll_lib.PianorollSequence(steps_per_quarter=1)
pianoroll_seq.append((0))
self.assertEqual(1, pianoroll_seq.num_steps)
self.assertListEqual([0], pianoroll_seq.steps)
pianoroll_seq.set_length(5)
self.assertEqual(5, pianoroll_seq.num_steps)
self.assertListEqual([0, 1, 2, 3, 4], pianoroll_seq.steps)
self.assertEqual([(0), (), (), (), ()], list(pianoroll_seq))
# Add 5 more steps.
pianoroll_seq.set_length(10)
self.assertEqual(10, pianoroll_seq.num_steps)
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], pianoroll_seq.steps)
self.assertEqual([(0)] + [()] * 9, list(pianoroll_seq))
def testSetLengthRemoveSteps(self):
pianoroll_seq = pianoroll_lib.PianorollSequence(steps_per_quarter=1)
pianoroll_events = [(), (2, 4), (2, 4), (2,), (5,)]
for event in pianoroll_events:
pianoroll_seq.append(event)
pianoroll_seq.set_length(2)
self.assertEqual([(), (2, 4)], list(pianoroll_seq))
pianoroll_seq.set_length(1)
self.assertEqual([()], list(pianoroll_seq))
pianoroll_seq.set_length(0)
self.assertEqual([], list(pianoroll_seq))
def testExtractPianorollSequences(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(quantized_sequence)
self.assertEqual(1, len(seqs))
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
quantized_sequence, min_steps_discard=2, max_steps_discard=5)
self.assertEqual(1, len(seqs))
self.note_sequence.notes[0].end_time = 1.0
self.note_sequence.total_time = 1.0
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
quantized_sequence, min_steps_discard=3, max_steps_discard=5)
self.assertEqual(0, len(seqs))
self.note_sequence.notes[0].end_time = 10.0
self.note_sequence.total_time = 10.0
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
quantized_sequence, min_steps_discard=3, max_steps_discard=5)
self.assertEqual(0, len(seqs))
def testExtractPianorollMultiProgram(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
self.note_sequence.notes[0].program = 2
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(quantized_sequence)
self.assertEqual(0, len(seqs))
def testExtractNonZeroStart(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
quantized_sequence, start_step=4, min_steps_discard=1)
self.assertEqual(0, len(seqs))
seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
quantized_sequence, start_step=0, min_steps_discard=1)
self.assertEqual(1, len(seqs))
if __name__ == '__main__':
tf.test.main()
| 34.746667 | 77 | 0.695574 |
c92d101b746046fe96e07afd2d7534467bfc9e6c | 1,611 | py | Python | texts/extraction/text_parser/terms_utils.py | nicolay-r/frame-based-attitude-extraction-workflow | f20e6d17a9eb6613028545b889c74626a8260ccd | [
"MIT"
] | null | null | null | texts/extraction/text_parser/terms_utils.py | nicolay-r/frame-based-attitude-extraction-workflow | f20e6d17a9eb6613028545b889c74626a8260ccd | [
"MIT"
] | 6 | 2020-10-03T13:45:38.000Z | 2021-07-13T18:31:57.000Z | texts/extraction/text_parser/terms_utils.py | nicolay-r/frame-based-attitude-extraction-workflow | f20e6d17a9eb6613028545b889c74626a8260ccd | [
"MIT"
] | null | null | null | from core.processing.lemmatization.base import Stemmer
from core.processing.ner.base import NamedEntityRecognition
from core.runtime.parser import ParsedText, TextParser
def to_input_terms(text, ner, stemmer, return_parsed_text=True, lemmatized_terms=False):
assert(isinstance(ner, NamedEntityRecognition) or ner is None)
assert (isinstance(stemmer, Stemmer) or stemmer is None)
parsed_text = TextParser.parse(text=text, stemmer=stemmer, lemmatize_on_init=lemmatized_terms)
input_terms = __get_terms_from_parsed_text(parsed_text=parsed_text,
ner=ner,
lemmatized_terms=lemmatized_terms)
if return_parsed_text:
return input_terms, parsed_text
else:
return input_terms
def __get_terms_from_parsed_text(parsed_text, ner, lemmatized_terms=False):
assert(isinstance(parsed_text, ParsedText))
assert(isinstance(ner, NamedEntityRecognition) or ner is None)
ner_input_terms = None
# Transforming terms into list of strings
if ner is not None:
# Optional lemmatization.
if ner.NeedLemmatization or lemmatized_terms:
ner_input_terms = parsed_text.iter_lemmas(need_cache=True)
# Optional lowercase.
if ner.NeedLowercase:
ner_input_terms = [t.lower() for t in ner_input_terms]
else:
if lemmatized_terms:
ner_input_terms = list(parsed_text.iter_lemmas(return_raw=True))
else:
ner_input_terms = list(parsed_text.iter_original_terms())
return ner_input_terms
| 37.465116 | 98 | 0.701428 |
488d5972cfb9b540566612c37adf3b327923a012 | 445 | py | Python | PythonExercicios/ex063.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | PythonExercicios/ex063.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | PythonExercicios/ex063.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | #Escreva que leia um número n inteiro qualquer e mostre na tela os n primeiros elementos da Sequência de Fibonacci
#Ex: 0 -> 1 -> 1 -> 2 -> 3 -> 5 -> 8 -> 13 ....
print(20*'-')
print('Seguencia de Fibonacci')
print(20*'-')
n = int(input('Quantos termos você que mostra ? '))
c = 3
t1 = 0
t2 = 1
print('{} -> {} -> '.format(t1,t2), end='')
while c <= n:
t3 = t2 + t1
t1 = t2
t2 = t3
print(' {} -> '.format(t3), end='')
c += 1 | 27.8125 | 114 | 0.548315 |
2adc870691f1d22fc93698e2b98930f8c95e9760 | 1,738 | py | Python | gpflow/__init__.py | codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | [
"Apache-2.0"
] | 1 | 2020-01-27T19:05:28.000Z | 2020-01-27T19:05:28.000Z | gpflow/__init__.py | codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | [
"Apache-2.0"
] | null | null | null | gpflow/__init__.py | codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | [
"Apache-2.0"
] | 2 | 2019-03-09T11:46:11.000Z | 2021-12-20T10:22:34.000Z | # Copyright 2016 alexggmatthews, James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from ._version import __version__
from ._settings import SETTINGS as settings
from .session_manager import get_session
from .session_manager import get_default_session
from .session_manager import reset_default_session
from .session_manager import reset_default_graph_and_session
from . import misc
from . import transforms
from . import conditionals
from . import logdensities
from . import likelihoods
from . import kernels
from . import priors
from . import core
from . import models
from . import test_util
from . import training as train
from . import features
from . import expectations
from . import probability_distributions
from .decors import autoflow
from .decors import defer_build
from .decors import name_scope
from .decors import params_as_tensors
from .decors import params_as_tensors_for
from .core.errors import GPflowError
from .core.compilable import Build
from .params import Parameter as Param
from .params import ParamList
from .params import DataHolder
from .params import Minibatch
from .params import Parameterized
from .saver import Saver
from .saver import SaverContext
from . import multioutput
| 28.966667 | 74 | 0.808976 |
9bd751d62c496fbfaf01c0e1acee65a3a25e1e54 | 389 | py | Python | services/dsrp-api/app/api/company_payment_info/response_models.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | null | null | null | services/dsrp-api/app/api/company_payment_info/response_models.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | 9 | 2020-05-06T23:29:43.000Z | 2022-03-14T22:58:17.000Z | services/dsrp-api/app/api/company_payment_info/response_models.py | bcgov/dormant-site-reclamation-program | 4710434174a204a292a3128d92c8daf1de2a65a6 | [
"Apache-2.0"
] | 3 | 2020-05-08T16:54:22.000Z | 2021-01-27T17:28:49.000Z | from app.extensions import api
from flask_restplus import fields
COMPANY_PAYMENT_INFO = api.model(
'CompanyPaymentInfo', {
'company_name': fields.String,
'company_address': fields.String,
'po_number': fields.String,
'po_number_2': fields.String,
'qualified_receiver_name': fields.String,
'expense_authority_name': fields.String,
})
| 29.923077 | 49 | 0.683805 |
5b1c6dc65a5a5272c75a52ec2680f3b3cc879aad | 4,994 | py | Python | testscripts/RDKB/component/TELCOVOICEMgrHal/TS_TelcoVoiceMgrHal_Init.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/TELCOVOICEMgrHal/TS_TelcoVoiceMgrHal_Init.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/TELCOVOICEMgrHal/TS_TelcoVoiceMgrHal_Init.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_TelcoVoiceMgrHal_Init</name>
<primitive_test_id/>
<primitive_test_name>TELCOVOICEMgrHal_Init</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To invoke the json hal client to connect to json hal server with Telco Voice Manager schema file</synopsis>
<groups_id/>
<execution_time>2</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_TELCOVOICEMGRHAL_01</test_case_id>
<test_objective>To invoke the json hal client to connect to json hal server with Telco Voice Manager schema file</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state of DUT
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>json_hal_client_init
json_hal_client_run
json_hal_is_client_connected</api_or_interface_used>
<input_parameters>None</input_parameters>
<automation_approch>1. Load the Telco Voice Manager HAL Module
2. Invoke the json_hal_client_init() JSON HAL API to initiate the connection to JSON HAL server with TELCO VOICE HAL Schema file
3. Invoke the json_hal_client_run() JSON HAL API to start the JSON HAL client service.
4. Invoke the json_hal_is_client_connected() to check whether JSON HAL client is connected to JSON HAL server or not
5. Return True if json hal client is connected to json hal server else failure
6. Unload the Telco Voice Manager HAL Module</automation_approch>
<expected_output>JSON HAL Client should be connected to JSON HAL server with TELCO VOICE HAL Schema file</expected_output>
<priority>High</priority>
<test_stub_interface>telcovoicemgrhal</test_stub_interface>
<test_script>TS_TelcoVoiceMgrHal_Init</test_script>
<skipped>No</skipped>
<release_version>M92</release_version>
<remarks/>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("telcovoicemgrhal","1");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_TelcoVoiceMgrHal_Init');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper() :
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep('TELCOVOICEMgrHal_Init');
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Initiate the TELCOVOICEMgrHal_Init operation";
print "EXPECTED RESULT 1: TELCOVOICEMgrHal_Init Should be success";
print "ACTUAL RESULT 1: TELCOVOICEMgrHal_Init was success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Initiate the TELCOVOICEMgrHal_Init operation";
print "EXPECTED RESULT 1: TELCOVOICEMgrHal_Init Should be Success";
print "ACTUAL RESULT 1: TELCOVOICEMgrHal_Init was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
obj.unloadModule("telcovoicemgrhal");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 42.683761 | 133 | 0.731678 |
6926df8a878de09ada2700f33e74e5195b170ed0 | 1,317 | py | Python | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/GetQualificationUploadPolicyRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/GetQualificationUploadPolicyRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/GetQualificationUploadPolicyRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetQualificationUploadPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'GetQualificationUploadPolicy')
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | 36.583333 | 84 | 0.768413 |
d7ed75f59eb755234f6604e8624825c06225ea45 | 654 | py | Python | BootStrapWEB/Bs.py | sir1st/homework | 9c75f9e6dca814a171d1b61b331fa87407c9b4a6 | [
"MIT"
] | null | null | null | BootStrapWEB/Bs.py | sir1st/homework | 9c75f9e6dca814a171d1b61b331fa87407c9b4a6 | [
"MIT"
] | 1 | 2018-01-04T05:50:54.000Z | 2018-01-04T05:50:54.000Z | BootStrapWEB/Bs.py | sir1st/homework | 9c75f9e6dca814a171d1b61b331fa87407c9b4a6 | [
"MIT"
] | null | null | null | #coding:utf-8
from flask import Flask,render_template,send_from_directory,request
#from flask.ext.bootstrap import Bootstrap #专为Flask开发发拓展都暴露在flask.ext命名空间下,Flask-Bootstrap输出一个Bootstrap类
from flask_bootstrap import Bootstrap
app=Flask(__name__)
bootstrap=Bootstrap(app)#Flask扩展一般都在创建实例时初始化,这行代码是Flask-Bootstrap的初始化方法
@app.route('/')
def index():
return render_template('index.html')
@app.route('/test')
def test():
return render_template('test.html')
@app.route('/Download',methods=['GET','POST'])
def Download():
if request.method=='POST':
return send_from_directory('D:/upload','temp.txt',as_attachment=True)
if True:
app.run() | 34.421053 | 104 | 0.766055 |
479905398dc59f3525ae6efd8f3ab243714b318e | 4,351 | py | Python | tempest/api/object_storage/test_object_version.py | dommgifer/repo | 2e3b9c4f92c4d1b63cd3e3d8ec4824f6bfc37bc2 | [
"Apache-2.0"
] | null | null | null | tempest/api/object_storage/test_object_version.py | dommgifer/repo | 2e3b9c4f92c4d1b63cd3e3d8ec4824f6bfc37bc2 | [
"Apache-2.0"
] | null | null | null | tempest/api/object_storage/test_object_version.py | dommgifer/repo | 2e3b9c4f92c4d1b63cd3e3d8ec4824f6bfc37bc2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.object_storage import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class ContainerTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(ContainerTest, cls).resource_setup()
cls.containers = []
@classmethod
def resource_cleanup(cls):
cls.delete_containers()
super(ContainerTest, cls).resource_cleanup()
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
self.assertHeaders(resp, 'Container', 'HEAD')
header_value = resp.get('x-container-object-count', 'Missing Header')
self.assertEqual(header_value, count)
header_value = resp.get('x-container-bytes-used', 'Missing Header')
self.assertEqual(header_value, byte)
header_value = resp.get('x-versions-location', 'Missing Header')
self.assertEqual(header_value, versioned)
@decorators.idempotent_id('a151e158-dcbf-4a1f-a1e7-46cd65895a6f')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.object_versioning,
'Object-versioning is disabled')
def test_versioned_container(self):
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
resp, _ = self.container_client.create_container(
vers_container_name)
self.containers.append(vers_container_name)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
base_container_name = data_utils.rand_name(name='TestBaseContainer')
headers = {'X-versions-Location': vers_container_name}
resp, _ = self.container_client.create_container(
base_container_name,
metadata=headers,
metadata_prefix='')
self.containers.append(base_container_name)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
object_name = data_utils.rand_name(name='TestObject')
# create object
data_1 = data_utils.random_bytes()
resp, _ = self.object_client.create_object(base_container_name,
object_name, data_1)
# create 2nd version of object
data_2 = data_utils.random_bytes()
resp, _ = self.object_client.create_object(base_container_name,
object_name, data_2)
_, body = self.object_client.get_object(base_container_name,
object_name)
self.assertEqual(body, data_2)
# delete object version 2
resp, _ = self.object_client.delete_object(base_container_name,
object_name)
self.assertContainer(base_container_name, '1', '1024',
vers_container_name)
_, body = self.object_client.get_object(base_container_name,
object_name)
self.assertEqual(body, data_1)
# delete object version 1
self.object_client.delete_object(base_container_name,
object_name)
# containers should be empty
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
self.assertContainer(vers_container_name, '0', '0',
'Missing Header')
| 44.397959 | 79 | 0.64422 |
f0395e26d540c26fe0217438e04d8c6e73a25a90 | 2,526 | py | Python | feedback.py | josephkreiser/chicago-roboto | c9bdbee1cdc686c6866fd9231353ee91bbe6623a | [
"Apache-2.0"
] | 131 | 2017-02-23T02:35:44.000Z | 2018-05-28T07:38:47.000Z | feedback.py | josephkreiser/chicago-roboto | c9bdbee1cdc686c6866fd9231353ee91bbe6623a | [
"Apache-2.0"
] | 23 | 2017-04-06T20:40:41.000Z | 2018-04-16T13:38:30.000Z | feedback.py | josephkreiser/chicago-roboto | c9bdbee1cdc686c6866fd9231353ee91bbe6623a | [
"Apache-2.0"
] | 33 | 2017-02-23T06:35:37.000Z | 2018-04-19T07:35:05.000Z | #!/usr/bin/env python
import sys, getopt, json, codecs
import mistune
import pystache
from os import path
template = """Hey there {{speaker}},
Thank you so much for helping make Chicago Roboto a big success. As promised, here are your session feedback results.
Session: **{{session}}**
Average Score:
{{average_table}}
Dump of scores:
{{table}}
We hope to see you in 2018!
Best
Jerrell, John and Ryan"""
def print_usage():
print "feedback.py -i <inputfile> -o <outputfile>"
def main(argv):
inputfile=''
outputfile=''
try:
opts, args = getopt.getopt(argv, "hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
with open(inputfile) as f:
data = json.load(f)
data = data['events']['chicagoroboto-2017']
speakers = data['speakers']
sessions = data['sessions']
feedback = data['feedback']
for session_id in feedback.iterkeys():
content = {}
session = sessions[session_id]
content['session'] = session['name']
# Feedback Table
feedback_table_template = "| Overall | Speaker | Technical |\n"
feedback_table_template += "| -------:| -------:| ---------:|\n"
session_feedback = feedback[session_id]
overall_avg = float(sum(f['overall'] for k, f in session_feedback.iteritems())) / len(session_feedback)
speaker_avg = float(sum(f['speaker'] for k, f in session_feedback.iteritems())) / len(session_feedback)
tech_avg = float(sum(f['technical'] for k, f in session_feedback.iteritems())) / len(session_feedback)
content['average_table'] = feedback_table_template
content['average_table'] += "| {:.2f} | {:.2f} | {:.2f} |".format(overall_avg, speaker_avg, tech_avg)
content['table'] = feedback_table_template
for key, fb in session_feedback.iteritems():
content['table'] += "| {} | {} | {} |\n".format(fb['overall'], fb['speaker'], fb['technical'])
session_speakers = []
if 'speakers' in session:
session_speakers = [speakers[speaker_id] for speaker_id in session['speakers']]
for speaker in session_speakers:
content['speaker'] = speaker['name']
email = mistune.markdown(pystache.render(template, content))
filename = "{}-{}.html".format(speaker['id'], session_id)
with codecs.open(path.join(outputfile, filename), "w", "utf-8") as f:
f.write(email)
if __name__ == "__main__":
main(sys.argv[1:])
| 27.16129 | 118 | 0.660333 |
6ec000da1564f2eb303d70f396e1b348cd894235 | 1,505 | py | Python | L1Trigger/L1THGCal/python/customTriggerCellSelect.py | Nik-Menendez/L1Trigger | 5336631cc0a517495869279ed7d3a4cac8d4e5e5 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | L1Trigger/L1THGCal/python/customTriggerCellSelect.py | Nik-Menendez/L1Trigger | 5336631cc0a517495869279ed7d3a4cac8d4e5e5 | [
"Apache-2.0"
] | 3 | 2018-08-23T13:40:24.000Z | 2019-12-05T21:16:03.000Z | L1Trigger/L1THGCal/python/customTriggerCellSelect.py | Nik-Menendez/L1Trigger | 5336631cc0a517495869279ed7d3a4cac8d4e5e5 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
import SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi as digiparam
from L1Trigger.L1THGCal.hgcalConcentratorProducer_cfi import threshold_conc_proc, best_conc_proc, supertc_conc_proc
def custom_triggercellselect_supertriggercell(process,
stcSize=supertc_conc_proc.stcSize
):
parameters = supertc_conc_proc.clone(stcSize = stcSize)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_threshold(process,
threshold_silicon=threshold_conc_proc.triggercell_threshold_silicon, # in mipT
threshold_scintillator=threshold_conc_proc.triggercell_threshold_scintillator # in mipT
):
parameters = threshold_conc_proc.clone(
triggercell_threshold_silicon = threshold_silicon,
triggercell_threshold_scintillator = threshold_scintillator
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_bestchoice(process,
triggercells=best_conc_proc.NData
):
parameters = best_conc_proc.clone(NData = triggercells)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
| 47.03125 | 127 | 0.671761 |
5fb38f864681f3aea1690245ae9ba1ecf3e45617 | 265 | py | Python | tests/artificial/transf_None/trend_MovingAverage/cycle_7/ar_12/test_artificial_1024_None_MovingAverage_7_12_20.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_None/trend_MovingAverage/cycle_7/ar_12/test_artificial_1024_None_MovingAverage_7_12_20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_None/trend_MovingAverage/cycle_7/ar_12/test_artificial_1024_None_MovingAverage_7_12_20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 12); | 37.857143 | 165 | 0.732075 |
ca52286dbfb729d490e25b94b7bf4561915ff879 | 1,646 | py | Python | los_tools/__init__.py | JanCaha/qgis_los_tools | f465acb70a49a86f86e68235300d6637d85adc29 | [
"MIT"
] | 1 | 2021-08-23T23:44:03.000Z | 2021-08-23T23:44:03.000Z | los_tools/__init__.py | JanCaha/qgis_los_tools | f465acb70a49a86f86e68235300d6637d85adc29 | [
"MIT"
] | 14 | 2020-04-17T14:19:56.000Z | 2021-02-17T21:21:52.000Z | los_tools/__init__.py | JanCaha/qgis_los_tools | f465acb70a49a86f86e68235300d6637d85adc29 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
los_tools
A QGIS plugin
This plugin creates and analyzes lines of sight and also provides supporting tools.
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2020-03-05
copyright : (C) 2020 by Jan Caha
email : jan.caha@outlook.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
__author__ = 'Jan Caha'
__date__ = '2020-03-05'
__copyright__ = '(C) 2020 by Jan Caha'
from los_tools.los_tools_plugin import los_toolsPlugin
# noinspection PyPep8Naming
def classFactory(iface):
"""Load los_tools class from file los_tools.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
return los_toolsPlugin()
| 40.146341 | 84 | 0.439247 |
fe6328d7c3df67c50ef7c55cd36d6a37133504ed | 7,762 | py | Python | sdk/python/pulumi_aws/opsworks/php_app_layer.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/opsworks/php_app_layer.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/opsworks/php_app_layer.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class PhpAppLayer(pulumi.CustomResource):
auto_assign_elastic_ips: pulumi.Output[bool]
"""
Whether to automatically assign an elastic IP address to the layer's instances.
"""
auto_assign_public_ips: pulumi.Output[bool]
"""
For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
"""
auto_healing: pulumi.Output[bool]
"""
Whether to enable auto-healing for the layer.
"""
custom_configure_recipes: pulumi.Output[list]
custom_deploy_recipes: pulumi.Output[list]
custom_instance_profile_arn: pulumi.Output[str]
"""
The ARN of an IAM profile that will be used for the layer's instances.
"""
custom_json: pulumi.Output[str]
"""
Custom JSON attributes to apply to the layer.
"""
custom_security_group_ids: pulumi.Output[list]
"""
Ids for a set of security groups to apply to the layer's instances.
"""
custom_setup_recipes: pulumi.Output[list]
custom_shutdown_recipes: pulumi.Output[list]
custom_undeploy_recipes: pulumi.Output[list]
drain_elb_on_shutdown: pulumi.Output[bool]
"""
Whether to enable Elastic Load Balancing connection draining.
"""
ebs_volumes: pulumi.Output[list]
"""
`ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
"""
elastic_load_balancer: pulumi.Output[str]
"""
Name of an Elastic Load Balancer to attach to this layer
"""
install_updates_on_boot: pulumi.Output[bool]
"""
Whether to install OS and package updates on each instance when it boots.
"""
instance_shutdown_timeout: pulumi.Output[float]
"""
The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
"""
name: pulumi.Output[str]
"""
A human-readable name for the layer.
"""
stack_id: pulumi.Output[str]
"""
The id of the stack the layer will belong to.
"""
system_packages: pulumi.Output[list]
"""
Names of a set of system packages to install on the layer's instances.
"""
use_ebs_optimized_instances: pulumi.Output[bool]
"""
Whether to use EBS-optimized instances.
"""
def __init__(__self__, resource_name, opts=None, auto_assign_elastic_ips=None, auto_assign_public_ips=None, auto_healing=None, custom_configure_recipes=None, custom_deploy_recipes=None, custom_instance_profile_arn=None, custom_json=None, custom_security_group_ids=None, custom_setup_recipes=None, custom_shutdown_recipes=None, custom_undeploy_recipes=None, drain_elb_on_shutdown=None, ebs_volumes=None, elastic_load_balancer=None, install_updates_on_boot=None, instance_shutdown_timeout=None, name=None, stack_id=None, system_packages=None, use_ebs_optimized_instances=None, __name__=None, __opts__=None):
"""
Provides an OpsWorks PHP application layer resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances.
:param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
:param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer.
:param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances.
:param pulumi.Input[str] custom_json: Custom JSON attributes to apply to the layer.
:param pulumi.Input[list] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances.
:param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining.
:param pulumi.Input[list] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
:param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer
:param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots.
:param pulumi.Input[float] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
:param pulumi.Input[str] name: A human-readable name for the layer.
:param pulumi.Input[str] stack_id: The id of the stack the layer will belong to.
:param pulumi.Input[list] system_packages: Names of a set of system packages to install on the layer's instances.
:param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['auto_assign_elastic_ips'] = auto_assign_elastic_ips
__props__['auto_assign_public_ips'] = auto_assign_public_ips
__props__['auto_healing'] = auto_healing
__props__['custom_configure_recipes'] = custom_configure_recipes
__props__['custom_deploy_recipes'] = custom_deploy_recipes
__props__['custom_instance_profile_arn'] = custom_instance_profile_arn
__props__['custom_json'] = custom_json
__props__['custom_security_group_ids'] = custom_security_group_ids
__props__['custom_setup_recipes'] = custom_setup_recipes
__props__['custom_shutdown_recipes'] = custom_shutdown_recipes
__props__['custom_undeploy_recipes'] = custom_undeploy_recipes
__props__['drain_elb_on_shutdown'] = drain_elb_on_shutdown
__props__['ebs_volumes'] = ebs_volumes
__props__['elastic_load_balancer'] = elastic_load_balancer
__props__['install_updates_on_boot'] = install_updates_on_boot
__props__['instance_shutdown_timeout'] = instance_shutdown_timeout
__props__['name'] = name
if stack_id is None:
raise TypeError("Missing required property 'stack_id'")
__props__['stack_id'] = stack_id
__props__['system_packages'] = system_packages
__props__['use_ebs_optimized_instances'] = use_ebs_optimized_instances
super(PhpAppLayer, __self__).__init__(
'aws:opsworks/phpAppLayer:PhpAppLayer',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.928994 | 609 | 0.721335 |
ff341780146347455e4baf83bcac4f80113d15e7 | 1,089 | py | Python | Pilot/src/run_wrapper.py | yumiai/Risk-Adjusted-Bonding-Curves | 56b4f714dc145cca1503b7c80d6a7b38c1e2e3fc | [
"MIT"
] | 31 | 2020-11-09T21:04:39.000Z | 2022-03-22T14:08:34.000Z | Pilot/src/run_wrapper.py | dnzengou/Risk-Adjusted-Bonding-Curves | 56b4f714dc145cca1503b7c80d6a7b38c1e2e3fc | [
"MIT"
] | 1 | 2020-03-24T16:20:49.000Z | 2020-03-24T16:20:49.000Z | Pilot/src/run_wrapper.py | dnzengou/Risk-Adjusted-Bonding-Curves | 56b4f714dc145cca1503b7c80d6a7b38c1e2e3fc | [
"MIT"
] | 7 | 2021-01-18T02:49:48.000Z | 2022-02-24T11:50:46.000Z | import pandas as pd
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD.configuration import Experiment
from cadCAD import configs
def run(drop_midsteps: bool=True) -> pd.DataFrame:
exec_mode = ExecutionMode()
exec_context = ExecutionContext(exec_mode.local_mode)
run = Executor(exec_context=exec_context, configs=configs)
# results = pd.DataFrame()
(system_events, tensor_field, sessions) = run.execute()
df = pd.DataFrame(system_events)
if drop_midsteps:
max_substep = max(df.substep)
is_droppable = (df.substep != max_substep)
is_droppable &= (df.substep != 0)
df = df.loc[~is_droppable]
return (df.reset_index(), tensor_field, sessions)
# if __name__ == '__main__':
# import sys
# # check
# sys.path.append('./src')
# from config_wrapper import ConfigWrapper
# # import options as options
# # change
# import model as model
# config = ConfigWrapper(market_model)
# config.append()
# results = run(drop_midsteps=True)
# print(results) | 27.225 | 67 | 0.682277 |
f0d2ead39cdd7f415b7cddb6ef88e662d85307e2 | 78 | py | Python | strategy/__init__.py | Neyzoter/Nelearn | b0aa9e0b40592a633e872eb0d1f73beb41cae9ee | [
"MIT"
] | null | null | null | strategy/__init__.py | Neyzoter/Nelearn | b0aa9e0b40592a633e872eb0d1f73beb41cae9ee | [
"MIT"
] | null | null | null | strategy/__init__.py | Neyzoter/Nelearn | b0aa9e0b40592a633e872eb0d1f73beb41cae9ee | [
"MIT"
] | null | null | null | """The Nelearn.strategy package."""
import sys
__author__ = 'Neyzoter Song'
| 13 | 35 | 0.717949 |
4b437795651de313d2eb0af071a3ba5c133ec9fa | 1,560 | py | Python | pyleecan/GUI/Dialog/DMachineSetup/SMagnet/PMagnet14/Gen_PMagnet14.py | Kelos-Zhu/pyleecan | 368f8379688e31a6c26d2c1cd426f21dfbceff2a | [
"Apache-2.0"
] | 2 | 2019-06-08T15:04:39.000Z | 2020-09-07T13:32:22.000Z | pyleecan/GUI/Dialog/DMachineSetup/SMagnet/PMagnet14/Gen_PMagnet14.py | lyhehehe/pyleecan | 421e9a843bf30d796415c77dc934546adffd1cd7 | [
"Apache-2.0"
] | null | null | null | pyleecan/GUI/Dialog/DMachineSetup/SMagnet/PMagnet14/Gen_PMagnet14.py | lyhehehe/pyleecan | 421e9a843bf30d796415c77dc934546adffd1cd7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""File generated according to PMagnet14/gen_list.json
WARNING! All changes made in this file will be lost!
"""
from pyleecan.GUI.Dialog.DMachineSetup.SMagnet.PMagnet14.Ui_PMagnet14 import (
Ui_PMagnet14,
)
class Gen_PMagnet14(Ui_PMagnet14):
def setupUi(self, PMagnet14):
"""Abstract class to update the widget according to the csv doc
"""
Ui_PMagnet14.setupUi(self, PMagnet14)
# Setup of in_Hmag
txt = self.tr(u"""magnet radial height [m]""")
self.in_Hmag.setWhatsThis(txt)
self.in_Hmag.setToolTip(txt)
# Setup of lf_Hmag
self.lf_Hmag.validator().setBottom(0)
txt = self.tr(u"""magnet radial height [m]""")
self.lf_Hmag.setWhatsThis(txt)
self.lf_Hmag.setToolTip(txt)
# Setup of in_Wmag
txt = self.tr(u"""magnet bottom width [rad]""")
self.in_Wmag.setWhatsThis(txt)
self.in_Wmag.setToolTip(txt)
# Setup of lf_Wmag
self.lf_Wmag.validator().setBottom(0)
txt = self.tr(u"""magnet bottom width [rad]""")
self.lf_Wmag.setWhatsThis(txt)
self.lf_Wmag.setToolTip(txt)
# Setup of in_Rtopm
txt = self.tr(u"""radius of the circular top shape [m]""")
self.in_Rtopm.setWhatsThis(txt)
self.in_Rtopm.setToolTip(txt)
# Setup of lf_Rtopm
self.lf_Rtopm.validator().setBottom(0)
txt = self.tr(u"""radius of the circular top shape [m]""")
self.lf_Rtopm.setWhatsThis(txt)
self.lf_Rtopm.setToolTip(txt)
| 33.191489 | 78 | 0.632051 |
1ec797461d9abcb98dc7cbf8184fb571d2d28b4b | 422 | py | Python | invoice/migrations/0004_auto_20160921_1732.py | elegant-solutions/django-webstore | 2c53189ea075a1d60a4d1e20a69ec8e831894068 | [
"MIT"
] | 1 | 2020-10-24T08:45:32.000Z | 2020-10-24T08:45:32.000Z | invoice/migrations/0004_auto_20160921_1732.py | elegant-solutions/django-webstore | 2c53189ea075a1d60a4d1e20a69ec8e831894068 | [
"MIT"
] | 14 | 2016-09-22T17:06:38.000Z | 2016-10-12T18:25:39.000Z | invoice/migrations/0004_auto_20160921_1732.py | elegant-solutions/django-webstore | 2c53189ea075a1d60a4d1e20a69ec8e831894068 | [
"MIT"
] | 3 | 2016-10-07T12:03:35.000Z | 2021-04-17T09:24:21.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('invoice', '0003_auto_20160921_1730'),
]
operations = [
migrations.AlterField(
model_name='usercheckout',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
]
| 21.1 | 65 | 0.616114 |
a8b2323f69404070b60ca33f9c4c954daa9416a8 | 5,684 | py | Python | data_generation/fractal_graph_expansions/random_matrix_ops.py | mwnewlin/afit_mlperf_training | abdf362abe6a14a85e13b8b18afe7d40cc9e5430 | [
"Apache-2.0"
] | null | null | null | data_generation/fractal_graph_expansions/random_matrix_ops.py | mwnewlin/afit_mlperf_training | abdf362abe6a14a85e13b8b18afe7d40cc9e5430 | [
"Apache-2.0"
] | 12 | 2019-03-25T17:38:16.000Z | 2022-03-11T23:43:25.000Z | data_generation/fractal_graph_expansions/random_matrix_ops.py | mwnewlin/afit_mlperf_training | abdf362abe6a14a85e13b8b18afe7d40cc9e5430 | [
"Apache-2.0"
] | 1 | 2019-03-11T15:59:25.000Z | 2019-03-11T15:59:25.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast shuffle and dropout of large sparse matrices.
For speed matrices are encoded in a pandas dataframe. To make the behavior
of the following operators deterministic, it is sufficient to setup numpy's
random seed before these operators are called (numpy.random.seed(seed_value)).
Note also that callers running the functions below in parallel are responsible
for guaranteeing that the corresponding underlying sequences of random numbers
(which will be genereted in parallel) are non overlapping.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
import numpy as np
from scipy import sparse
from sklearn.utils import shuffle
flags.DEFINE_float("min_dropout_rate",
0.05,
"Mininum dropout rate in shuffle_sparse_matrix if none is "
"specified. A lower dropout rate will be clipped to "
"the minimum value.")
flags.DEFINE_float("max_dropout_rate",
0.99,
"Maximum dropout rate in shuffle_sparse_matrix if none is "
"specified. A greater dropout rate will be clipped to "
"the maximum value.")
FLAGS = flags.FLAGS
def _dropout_sparse_coo_matrix(sparse_matrix, rate,
min_dropout_rate, max_dropout_rate):
"""Drop values from a sparse matrix encoded as a SciPy coo matrix.
Args:
sparse_matrix: a SciPy coo sparse matrix.
rate: if rate > 0 then non-zero elements of the input matrix
will be droped uniformly at random.
min_dropout_rate: minimum value for the dropout rate. If None
FLAGS.min_dropout_rate is used. If dropout_rate is lower than
min_dropout_rate it will clipped to min_dropout_rate.
max_dropout_rate: minimum value for the dropout rate. If None
FLAGS.max_dropout_rate is used. If dropout_rate is greater than
max_dropout_rate it will clipped to max_dropout_rate.
Returns:
A SciPy coo matrix containing those non zero elements that have not been
dropped out.
"""
if min_dropout_rate is None:
min_dropout_rate = FLAGS.min_dropout_rate
if max_dropout_rate is None:
max_dropout_rate = FLAGS.max_dropout_rate
if min_dropout_rate > max_dropout_rate:
raise ValueError("min_dropout_rate (%f) should be less or equal to "
"max_dropout_rate (%f)"
% (min_dropout_rate, max_dropout_rate))
max_frac = 1.0 - min_dropout_rate
min_frac = 1.0 - max_dropout_rate
sampling_rate = 1.0 - rate
sampled_fraction = min(max(sampling_rate, min_frac), max_frac)
if sampled_fraction != sampling_rate:
logging.warning("Minimum sampling rate is %2f.", min_frac)
logging.warning("Maximum sampling rate is %2f.", max_frac)
logging.warning("Desired sampling rate is %2f.", sampling_rate)
logging.warning("Desired sampling rate %2f clipped to %2f.", sampling_rate,
sampled_fraction)
num_sampled = min(
max(int(sparse_matrix.nnz * sampled_fraction), 1), sparse_matrix.nnz)
sampled_indices = np.random.choice(sparse_matrix.nnz, size=num_sampled,
replace=False)
return sparse.coo_matrix((sparse_matrix.data[sampled_indices],
(sparse_matrix.row[sampled_indices],
sparse_matrix.col[sampled_indices])),
shape=sparse_matrix.shape)
def shuffle_sparse_coo_matrix(sparse_matrix, dropout_rate=0.0,
min_dropout_rate=None, max_dropout_rate=None):
"""Shuffle sparse matrix encoded as a SciPy coo matrix.
Args:
sparse_matrix: a SciPy coo sparse matrix.
dropout_rate: if dropout_rate > 0 then non-zero elements of the input matrix
will be droped uniformly at random.
min_dropout_rate: minimum value for the dropout rate. If None
FLAGS.min_dropout_rate is used.
max_dropout_rate: minimum value for the dropout rate. If None
FLAGS.max_dropout_rate is used.
Returns:
A SciPy csr_matrix entailing the randomized interactions.
"""
if (dropout_rate < 0.0) or (dropout_rate >= 1.0):
raise ValueError("Dropout rate should be in [0, 1) but is %f"
% dropout_rate)
(num_rows, num_cols) = sparse_matrix.shape
shuffled_rows = shuffle(np.arange(num_rows))
shuffled_cols = shuffle(np.arange(num_cols))
if dropout_rate > 0.0:
sparse_matrix = _dropout_sparse_coo_matrix(
sparse_matrix, dropout_rate, min_dropout_rate, max_dropout_rate)
new_row = np.take(shuffled_rows, sparse_matrix.row)
new_col = np.take(shuffled_cols, sparse_matrix.col)
return sparse.csr_matrix(
(sparse_matrix.data, (new_row, new_col)), shape=(num_rows, num_cols))
| 40.892086 | 83 | 0.677516 |
29de9d30c974bd300ec62d731527a9a6ef243d67 | 4,716 | py | Python | pyecharts/custom/timeline.py | xieyinsong/pyecharts | 6ea6654c7a9cc19f89cf507a3c51fe77dd538e06 | [
"MIT"
] | null | null | null | pyecharts/custom/timeline.py | xieyinsong/pyecharts | 6ea6654c7a9cc19f89cf507a3c51fe77dd538e06 | [
"MIT"
] | null | null | null | pyecharts/custom/timeline.py | xieyinsong/pyecharts | 6ea6654c7a9cc19f89cf507a3c51fe77dd538e06 | [
"MIT"
] | null | null | null | # coding=utf-8
import copy
from pyecharts.base import Base
from pyecharts.constants import PAGE_TITLE
from pyecharts.utils import merge_js_dependencies
class Timeline(Base):
"""
<<< 时间线轮播多张图 >>>
"""
def __init__(
self,
page_title=PAGE_TITLE,
width=800,
height=400,
is_auto_play=False,
is_loop_play=True,
is_rewind_play=False,
is_timeline_show=True,
timeline_play_interval=2000,
timeline_symbol="emptyCircle",
timeline_symbol_size=10,
timeline_left="auto",
timeline_right="auto",
timeline_top="auto",
timeline_bottom="atuo",
):
"""
:param is_auto_play:
是否自动播放,默认为 Flase
:param is_loop_play:
是否循环播放,默认为 True
:param is_rewind_play:
是否方向播放,默认为 Flase
:param is_timeline_show:
是否显示 timeline 组件。默认为 True,如果设置为false,不会显示,但是功能还存在。
:param timeline_play_interval:
播放的速度(跳动的间隔),单位毫秒(ms)。
:param timeline_symbol:
标记的图形。有'circle', 'rect', 'roundRect', 'triangle', 'diamond',
'pin', 'arrow'可选
:param timeline_symbol_size:
标记的图形大小,可以设置成诸如 10 这样单一的数字,也可以用数组分开表示
宽和高,例如 [20, 10] 表示标记宽为 20,高为 10。
:param timeline_left:
timeline 组件离容器左侧的距离。
left 的值可以是像 20 这样的具体像素值,可以是像 '20%' 这样相对于容器高宽的百分比,
也可以是 'left', 'center', 'right'。如果 left 的值为'left', 'center',
'right',组件会根据相应的位置自动对齐。
:param timeline_right:
timeline 组件离容器右侧的距离。同 left
:param timeline_top:
timeline 组件离容器顶侧的距离。同 left
:param timeline_bottom:
timeline 组件离容器底侧的距离。同 left
"""
super(Timeline, self).__init__(width=width, height=height)
self._page_title = page_title
self._time_points = []
self._option = {
"baseOption": {
"timeline": {
"axisType": "category",
"autoPlay": is_auto_play,
"loop": is_loop_play,
"rewind": is_rewind_play,
"show": is_timeline_show,
"symbol": timeline_symbol,
"symbolSize": timeline_symbol_size,
"playInterval": timeline_play_interval,
"left": timeline_left,
"right": timeline_right,
"top": timeline_top,
"bottom": timeline_bottom,
},
"series": [],
},
"options": [],
}
def add(self, chart, time_point):
"""
:param chart:
图形实例
:param time_point:
指定时间点
"""
chart_options = chart.get_options(remove_none=False)
self._js_dependencies = merge_js_dependencies(
self._js_dependencies, chart.js_dependencies
)
self.__check_components(chart)
self._time_points.append(time_point)
self._option.get("baseOption").update(
backgroundColor=chart_options.get("backgroundColor")
)
self._option.get("baseOption").get("timeline").update(
data=self._time_points
)
_option = {
"color": chart_options.get("color"),
"legend": chart.options.get('legend'),
"series": chart.options.get('series'),
"title": chart.options.get('title'),
"tooltip": chart.options.get('tooltip'),
}
if chart.options.get('xAxis'):
_option['xAxis'] = chart.options.get('xAxis')
if chart.options.get('yAxis'):
_option['yAxis'] = chart.options.get('yAxis')
self._option.get('options').append(_option)
_tmp_series = copy.deepcopy(chart_options.get("series"))
for _s in _tmp_series:
if _s.get("type") == "map":
_s.pop("data", None)
self._option.get("baseOption").get("series").append(_s)
return self
def __check_components(self, chart):
"""
:param chart:
图形实例
"""
chart_options = chart.get_options(remove_none=False)
_compoents = [
"grid",
"xAxis",
"yAxis",
"polar",
"radiusAxis",
"geo",
"angleAxis",
"radar",
"visualMap",
"dataZoom",
"parallelAxis",
]
for component in _compoents:
_c = chart_options.get(component, None)
if _c is not None:
self._option.get("baseOption").update({component: _c})
| 31.231788 | 72 | 0.529262 |
b40a224a859271210987777174d9ae07fbeb8a98 | 9,044 | py | Python | classif_and_ktst.py | emanuele/jstsp2015 | 6246d28013652ddf2f889abe9d256ce0afd4d326 | [
"MIT"
] | 8 | 2017-03-31T23:59:53.000Z | 2022-01-16T18:51:54.000Z | classif_and_ktst.py | emanuele/jstsp2015 | 6246d28013652ddf2f889abe9d256ce0afd4d326 | [
"MIT"
] | null | null | null | classif_and_ktst.py | emanuele/jstsp2015 | 6246d28013652ddf2f889abe9d256ce0afd4d326 | [
"MIT"
] | 14 | 2016-01-03T09:01:02.000Z | 2020-03-31T06:05:57.000Z | """Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
"""Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def compute_svm_score_nestedCV(K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose)
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name)
| 34.519084 | 180 | 0.601283 |
08c40c0ecc32085e382fc625e515f587aedb1a19 | 265 | py | Python | reverse_order.py | madhurilalitha/Test-repo | a70922a66156c1780e012b5d74017b16c962ecda | [
"MIT"
] | null | null | null | reverse_order.py | madhurilalitha/Test-repo | a70922a66156c1780e012b5d74017b16c962ecda | [
"MIT"
] | null | null | null | reverse_order.py | madhurilalitha/Test-repo | a70922a66156c1780e012b5d74017b16c962ecda | [
"MIT"
] | null | null | null | '''given a list print all the elements of the list in reverse way'''
def reverse_order(alist):
reverse_list = []
for i in range(len(alist)-1, -1):
reverse_list.append(alist[i])
return reverse_list
reverse = reverse_order([2,6,0,1,5])
print(reverse)
| 26.5 | 69 | 0.690566 |
7309c09c5a675725a01cbbf25ad5b599242788bb | 4,328 | py | Python | extra/sqlharvest/sqlharvest.py | danielvvDev/Sqlmap-Reforced2 | 6780f801471ebfee5424781c548c47e11d272dc9 | [
"MIT"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | extra/sqlharvest/sqlharvest.py | danielvvDev/Sqlmap-Reforced2 | 6780f801471ebfee5424781c548c47e11d272dc9 | [
"MIT"
] | null | null | null | extra/sqlharvest/sqlharvest.py | danielvvDev/Sqlmap-Reforced2 | 6780f801471ebfee5424781c548c47e11d272dc9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import cookielib
import re
import socket
import sys
import urllib
import urllib2
import ConfigParser
from operator import itemgetter
TIMEOUT = 10
CONFIG_FILE = 'sqlharvest.cfg'
TABLES_FILE = 'tables.txt'
USER_AGENT = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; AskTB5.3)'
SEARCH_URL = 'http://www.google.com/m?source=mobileproducts&dc=gorganic'
MAX_FILE_SIZE = 2 * 1024 * 1024 # if a result (.sql) file for downloading is more than 2MB in size just skip it
QUERY = 'CREATE TABLE ext:sql'
REGEX_URLS = r';u=([^"]+?)&q='
REGEX_RESULT = r'(?i)CREATE TABLE\s*(/\*.*\*/)?\s*(IF NOT EXISTS)?\s*(?P<result>[^\(;]+)'
def main():
tables = dict()
cookies = cookielib.CookieJar()
cookie_processor = urllib2.HTTPCookieProcessor(cookies)
opener = urllib2.build_opener(cookie_processor)
opener.addheaders = [("User-Agent", USER_AGENT)]
conn = opener.open(SEARCH_URL)
page = conn.read() # set initial cookie values
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
if not config.has_section("options"):
config.add_section("options")
if not config.has_option("options", "index"):
config.set("options", "index", "0")
i = int(config.get("options", "index"))
try:
with open(TABLES_FILE, 'r') as f:
for line in f.xreadlines():
if len(line) > 0 and ',' in line:
temp = line.split(',')
tables[temp[0]] = int(temp[1])
except:
pass
socket.setdefaulttimeout(TIMEOUT)
files, old_files = None, None
try:
while True:
abort = False
old_files = files
files = []
try:
conn = opener.open("%s&q=%s&start=%d&sa=N" % (SEARCH_URL, QUERY.replace(' ', '+'), i * 10))
page = conn.read()
for match in re.finditer(REGEX_URLS, page):
files.append(urllib.unquote(match.group(1)))
if len(files) >= 10:
break
abort = (files == old_files)
except KeyboardInterrupt:
raise
except Exception, msg:
print msg
if abort:
break
sys.stdout.write("\n---------------\n")
sys.stdout.write("Result page #%d\n" % (i + 1))
sys.stdout.write("---------------\n")
for sqlfile in files:
print sqlfile
try:
req = urllib2.Request(sqlfile)
response = urllib2.urlopen(req)
if "Content-Length" in response.headers:
if int(response.headers.get("Content-Length")) > MAX_FILE_SIZE:
continue
page = response.read()
found = False
counter = 0
for match in re.finditer(REGEX_RESULT, page):
counter += 1
table = match.group("result").strip().strip("`\"'").replace('"."', ".").replace("].[", ".").strip('[]')
if table and not any(_ in table for _ in ('>', '<', '--', ' ')):
found = True
sys.stdout.write('*')
if table in tables:
tables[table] += 1
else:
tables[table] = 1
if found:
sys.stdout.write("\n")
except KeyboardInterrupt:
raise
except Exception, msg:
print msg
else:
i += 1
except KeyboardInterrupt:
pass
finally:
with open(TABLES_FILE, 'w+') as f:
tables = sorted(tables.items(), key=itemgetter(1), reverse=True)
for table, count in tables:
f.write("%s,%d\n" % (table, count))
config.set("options", "index", str(i + 1))
with open(CONFIG_FILE, 'w+') as f:
config.write(f)
if __name__ == "__main__":
main()
| 30.478873 | 127 | 0.496996 |
ebd022a798aba7a6f654374c8a99b2545ed8ebb7 | 90 | py | Python | boa3_test/example/arithmetic_test/MixedOperations.py | jplippi/neo3-boa | 052be4adebb665113715bb80067d954f7ad85ad5 | [
"Apache-2.0"
] | null | null | null | boa3_test/example/arithmetic_test/MixedOperations.py | jplippi/neo3-boa | 052be4adebb665113715bb80067d954f7ad85ad5 | [
"Apache-2.0"
] | null | null | null | boa3_test/example/arithmetic_test/MixedOperations.py | jplippi/neo3-boa | 052be4adebb665113715bb80067d954f7ad85ad5 | [
"Apache-2.0"
] | null | null | null | def Main(a: int, b: int, c: int, d: int, e: int) -> int:
return a + c * e - (-d) // b
| 30 | 56 | 0.455556 |
6a4e73f06f0a6bfbd57c61f5efd97ec13023b45e | 850 | py | Python | users/migrations/0001_initial.py | omarion3698/awwards | 5e4b9330c11d1bdda511bb27141021e17f88c96f | [
"MIT"
] | null | null | null | users/migrations/0001_initial.py | omarion3698/awwards | 5e4b9330c11d1bdda511bb27141021e17f88c96f | [
"MIT"
] | null | null | null | users/migrations/0001_initial.py | omarion3698/awwards | 5e4b9330c11d1bdda511bb27141021e17f88c96f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-26 18:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.CharField(default='Available', max_length=30)),
('image', models.ImageField(default='default.png', upload_to='images')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.481481 | 121 | 0.637647 |
2e2fdbe54eb8b76f0b8d1600568aad6debc4154a | 11,495 | py | Python | py_src/lingtree/tigerxml.py | yv/lingtree | 7356baa6792db0f88aa7b4f0ab4c2e32907741d6 | [
"MIT"
] | 1 | 2021-08-06T14:16:42.000Z | 2021-08-06T14:16:42.000Z | py_src/lingtree/tigerxml.py | yv/lingtree | 7356baa6792db0f88aa7b4f0ab4c2e32907741d6 | [
"MIT"
] | null | null | null | py_src/lingtree/tigerxml.py | yv/lingtree | 7356baa6792db0f88aa7b4f0ab4c2e32907741d6 | [
"MIT"
] | 1 | 2021-08-06T14:16:44.000Z | 2021-08-06T14:16:44.000Z | # Copyright 2008-2020 Yannick Versley
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
deals with the TigerXML file format for treebanks.
Reading support is limited to "standard" attributes.
Writing support requires lxml, also limited to "standard"
attributes
"""
from __future__ import print_function
from builtins import str, bytes
import sys
from .tree import Tree, TerminalNode, NontermNode
try:
from lxml import etree
def write_node(f_out, s_node, encoding):
f_out.write(etree.tostring(s_node, pretty_print=True,
encoding=encoding))
except ImportError:
import xml.etree.cElementTree as etree
def add_some_space(node, indent=''):
chlds = list(node)
if chlds:
node.text = '\n '+indent
for n in chlds:
add_some_space(n, indent+' ')
chlds[-1].tail = '\n'+indent
node.tail = '\n'+indent
def write_node(f_out, s_node, encoding):
# add some basic formatting
add_some_space(s_node)
s_result = etree.tostring(s_node, encoding=encoding)
# strip xml header.
if '<?' in s_result:
idx = s_result.index('?>')
s_result = s_result[idx+2:]
f_out.write(s_result)
from xml.sax.saxutils import quoteattr, escape
def encoded_attrib(n, att, default_val):
att_val = n.attrib.get(att, default_val)
return att_val
def get_terminals(graph, term_ref):
'''makes terminals out of all the TigerXML terminals'''
terminals = []
for n in graph.find('terminals').findall('t'):
try:
w = n.attrib['word']
except KeyError:
w = n.attrib['orth']
trm = TerminalNode(encoded_attrib(n, 'pos', '--'), w)
for k in ['morph', 'lemma']:
if k in n.attrib:
val = n.attrib[k]
setattr(trm, k, val)
trm.xml_id = n.attrib['id']
assert not trm.xml_id in term_ref, (term_ref[trm.xml_id], trm)
term_ref[trm.xml_id] = trm
terminals.append(trm)
return terminals
#pylint:disable=C0103
def tiger_sent(node):
'decodes the TigerXML sentence from the given XML node'
t = Tree()
term_ref = {}
graph = node.find('graph')
try:
node_id = node.attrib['id']
if node_id[0] == 's':
node_id = node_id[1:]
t.sent_no = int(node_id)
except ValueError:
t.sent_no = node.attrib.get('id', None)
t.terminals = get_terminals(graph, term_ref)
for n in graph.find('nonterminals').findall('nt'):
nt = NontermNode(encoded_attrib(n, 'cat', '--'))
nt.xml_id = n.attrib['id']
term_ref[nt.xml_id] = nt
for n in graph.find('nonterminals').findall('nt'):
nt = term_ref[n.attrib['id']]
chlds = []
for e in n.findall('edge'):
x = term_ref[e.attrib['idref']]
assert x.parent is None, (nt, x.parent, x)
x.edge_label = encoded_attrib(e, 'label', None)
x.parent = nt
chlds.append(x)
nt.children = chlds
for n in graph.find('nonterminals').findall('nt'):
nt = term_ref[n.attrib['id']]
if (not hasattr(nt, 'parent') or nt.parent is None or
nt.parent.cat == 'VROOT'):
nt.parent = None
if nt.cat != 'VROOT':
t.roots.append(nt)
for i, n in enumerate(graph.find('terminals').findall('t')):
trm = term_ref[n.attrib['id']]
trm.start = i
trm.end = i + 1
if (not hasattr(trm, 'parent') or trm.parent is None or
trm.parent.cat == 'VROOT'):
trm.parent = None
trm.edge_label = '--'
if trm.cat != 'VROOT':
t.roots.append(trm)
t.renumber_ids()
t.determine_tokenspan_all()
return t
def assign_node_ids(t, suffix=''):
"""
makes sure that a tree, and all its terminal
and nonterminal nodes, have a suitable xml_id
attribute.
"""
if hasattr(t, 'xml_id'):
sent_id = t.xml_id
elif hasattr(t, 'sent_no'):
sent_id = 's%s'%(t.sent_no,)
if sent_id[:2] == 'ss':
sent_id = sent_id[1:]
t.xml_id = sent_id
elif hasattr(t, 'sent_id'):
sent_id = str(t.sent_id)
t.xml_id = sent_id
for i, n in enumerate(t.terminals):
if not hasattr(n, 'xml_id'):
n.xml_id = '%s_%s'%(sent_id, i+1)
node_id = 500
for n in t.bottomup_enumeration():
if n.isTerminal():
continue
if hasattr(n, 'xml_id'):
continue
n.xml_id = '%s_n%s%s'%(sent_id, node_id, suffix)
node_id += 1
def make_string(n, attname):
"""look for an attribute of a node and returns the attribute, or --"""
if hasattr(n, attname):
val = getattr(n, attname, None)
if val is None:
return '--'
else:
return str(val)
def read_trees(fname):
'''yields the sequence of trees in an XML file'''
#pylint:disable=W0612
for ev, elem in etree.iterparse(fname):
if elem.tag == 's':
yield tiger_sent(elem)
elem.clear()
def read_kbest_lists(fname):
'''
reads kbest lists of trees
'''
for ev, elem in etree.iterparse(fname):
if elem.tag == 'sentence':
node_gold = elem.find('gold-tree')
node_gold_s = node_gold.find('s')
t_gold = tiger_sent(node_gold_s)
kbest = []
for node_kbest in elem.findall('kbest-tree'):
node_kbest_s = node_kbest.find('s')
assert node_kbest_s is not None, node_kbest
t = tiger_sent(node_kbest_s)
if 'model-score' in node_kbest.attrib:
t.score = float(node_kbest.attrib['model-score'])
if 'score' in node_kbest.attrib:
t.eval_score = float(node_kbest.attrib['score'])
kbest.append(t)
yield (t_gold, kbest)
elem.clear()
def encode_tree(t, encoding=None, always_vroot=True,
id_suffix='', extra_term_att=None,
extra_nt_att=None):
'''returns an XML node describing a tree'''
if encoding is None:
encoding = t.encoding
assign_node_ids(t, suffix=id_suffix)
s_node = etree.Element('s')
s_node.attrib['id'] = t.xml_id
graph = etree.SubElement(s_node, 'graph')
trms_node = etree.SubElement(graph, 'terminals')
for n in t.terminals:
trm = etree.SubElement(trms_node, 't')
trm.attrib['id'] = n.xml_id
trm.attrib['word'] = n.word
trm.attrib['pos'] = n.cat
trm.attrib['morph'] = make_string(n, 'morph')
if hasattr(n, 'lemma') and n.lemma is not None:
trm.attrib['lemma'] = n.lemma
if extra_term_att:
for att in extra_term_att:
if hasattr(n, att) and getattr(n, att) is not None:
trm.attrib[att] = make_string(n, att)
nts_node = etree.SubElement(graph, 'nonterminals')
for n in t.bottomup_enumeration():
if n.isTerminal():
continue
nt_node = etree.SubElement(nts_node, 'nt')
nt_node.attrib['id'] = n.xml_id
nt_node.attrib['cat'] = n.cat
if extra_nt_att:
for att in extra_nt_att:
if hasattr(n, att) and getattr(n, att) is not None:
nt_node.attrib[att] = make_string(n, att)
for chld in n.children:
edge = etree.SubElement(nt_node, 'edge')
edge.attrib['label'] = make_string(chld, 'edge_label')
edge.attrib['idref'] = chld.xml_id
if always_vroot or len(t.roots) > 1:
vroot = etree.SubElement(nts_node, 'nt',
cat='VROOT',
id='%s_VROOT'%(t.xml_id,))
for n in t.roots:
edge = etree.SubElement(vroot, 'edge',
label=make_string(n, 'edge_label'),
idref=n.xml_id)
graph.attrib['root'] = '%s_VROOT'%(t.xml_id,)
else:
graph.attrib['root'] = t.roots[0].xml_id
return s_node
def describe_schema(f_out, schema, domain, encoding):
for attr in schema.attributes:
if hasattr(attr, 'names'):
if attr.name == 'func':
if domain == 'NT':
continue
else:
print(' <edgelabel>', file=f_out)
else:
print(' <feature name="%s" domain="%s">'%(
attr.name, domain), file=f_out)
for name in attr.names:
print(' <value name=%s>%s</value>'%(
quoteattr(name),
escape(attr.descriptions[name].encode(encoding))), file=f_out)
if attr.name == 'func':
print(' </edgelabel>', file=f_out)
else:
print(' </feature>', file=f_out)
else:
print(' <feature name="%s" domain="%s"/>'%(
attr.name, domain), file=f_out)
def write_tiger_file(f_out, trees, meta=None, encoding="UTF-8",
corpus_id="pytree_output"):
print('<?xml version="1.0" encoding="%s" standalone="yes"?>'%(encoding,), file=f_out)
print('<corpus id="%s">'%(corpus_id,), file=f_out)
print('<head>', file=f_out)
#TODO print meta information
if meta:
print(' <annotation>', file=f_out)
nt_cat = meta['NT'].attribute_by_name('cat')
if 'VROOT' not in nt_cat.descriptions:
nt_cat.add_item('VROOT', 'virtual root node')
describe_schema(f_out, meta['T'], 'T', encoding)
describe_schema(f_out, meta['NT'], 'NT', encoding)
print(' </annotation>', file=f_out)
print('</head>', file=f_out)
print('<body>', file=f_out)
for t in trees:
s_node = encode_tree(t, None)
write_node(f_out, s_node, encoding)
print("</body>", file=f_out)
print("</corpus>", file=f_out)
def tiger2export_main(args):
'''converts one file into .export format (body only)'''
from . import export
for i, t in enumerate(read_trees(args[0])):
# BOS ([0-9]+) +[^ ]+ +[^ ]+ ([0-9]+)([ \t]*%%.*)?')
print("#BOS %d -1 -1 0" % (i + 1,))
export.write_sentence_tabs(t, sys.stdout)
print("#EOS %d" % (i + 1,))
if __name__ == '__main__':
tiger2export_main(sys.argv[1:])
| 37.8125 | 115 | 0.571727 |
232477d1a2e19b60e70bb78d55802171edcce92e | 4,693 | py | Python | members/conferences/models.py | ocwc/ocwc-members | 3ede8e0ff830e2aaff4ae09f9aaefd3eaa83146b | [
"MIT"
] | null | null | null | members/conferences/models.py | ocwc/ocwc-members | 3ede8e0ff830e2aaff4ae09f9aaefd3eaa83146b | [
"MIT"
] | 7 | 2015-11-27T15:59:52.000Z | 2022-01-13T00:38:38.000Z | members/conferences/models.py | ocwc/ocwc-members | 3ede8e0ff830e2aaff4ae09f9aaefd3eaa83146b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import uuid
from tinymce import HTMLField
from django.db import models
from django.urls import reverse
from django.core.mail import EmailMessage
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from crm.utils import print_pdf
class ConferenceInterface(models.Model):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255, default="")
api_key = models.CharField(max_length=255, default="")
private_key = models.CharField(max_length=255, default="")
last_synced = models.DateTimeField(null=True)
def __unicode__(self):
return self.name
class ConferenceRegistration(models.Model):
PAYMENT_TYPE = (("paypal", "PayPal"), ("wire", "Wire Transfer"))
interface = models.ForeignKey(ConferenceInterface, models.CASCADE)
form_id = models.CharField(max_length=255)
entry_id = models.CharField(max_length=255)
entry_created = models.DateTimeField(null=True)
name = models.CharField(max_length=255, default="")
email = models.CharField(max_length=255, default="")
organization = models.CharField(max_length=255, default="")
billing_address = models.TextField(default="", blank=True)
ticket_type = models.CharField(max_length=255)
dinner_guest = models.CharField(max_length=255, default="", blank=True)
dinner_guest_qty = models.IntegerField(default=0)
conference_dinner = models.CharField(max_length=255, default="", blank=True)
reception_guest = models.CharField(max_length=255, default="", blank=True)
reception_guest_qty = models.IntegerField(default=0)
total_amount = models.CharField(max_length=255)
payment_type = models.CharField(choices=PAYMENT_TYPE, max_length=255)
products = JSONField(blank=True, null=True)
qbo_id = models.IntegerField(blank=True, null=True)
source_url = models.CharField(max_length=255)
billing_html = HTMLField(default="")
product_html = models.TextField(default="")
last_synced = models.DateTimeField(auto_now=True)
access_key = models.CharField(max_length=32, blank=True)
is_group = models.BooleanField(default=False)
def save(self, force_insert=False, force_update=False, using=None):
if not self.access_key:
self.access_key = uuid.uuid4().hex
super(ConferenceRegistration, self).save(
force_insert=force_insert, force_update=force_update, using=using
)
def get_access_key_url(self):
return reverse(
"conferences:invoice_preview",
kwargs={"pk": self.id, "access_key": self.access_key},
)
def get_absolute_url(self):
return "/conferences/#registration-{}".format(self.id)
def email_invoice(self):
body = """Thank you for registering for Open Education Global Conference 2018 (24-26 April in Delft, the Netherlands).
Attached is your invoice.
Do not hesitate to contact us at conference@oeglobal.org if you have any questions.
We look forward to welcoming you in the Netherlands!
Open Education Global Conference 2018 Planning Team.
"""
message = EmailMessage(
subject="Open Education Global Conference 2018 - Invoice",
body=body,
from_email="conference@oeglobal.org",
to=[self.email],
# bcc = ['conference@oeglobal.org']
)
url = "%s%s" % (settings.INVOICES_PHANTOM_JS_HOST, self.get_access_key_url())
pdf_file = print_pdf(url)
message.attach(
filename="OEGlobal-Invoice-%s.pdf" % self.entry_id,
content=pdf_file,
mimetype="application/pdf",
)
message.send()
CONFERENCE_REGISTRATION_TYPE = (
("normal", "Normal registration"),
("presenter", "Presenter registration"),
)
class ConferenceEmailTemplate(models.Model):
subject = models.CharField(max_length=255)
body_text = models.TextField()
body_html = models.TextField()
email_type = models.CharField(
choices=CONFERENCE_REGISTRATION_TYPE, default="normal", max_length=20
)
def __str__(self) -> str:
return "{}".format(self.subject)
class ConferenceEmailRegistration(models.Model):
email = models.CharField(max_length=255)
email_type = models.CharField(
choices=CONFERENCE_REGISTRATION_TYPE, default="normal", max_length=20
)
def __str__(self) -> str:
return "{} - {}".format(self.email, self.email_type)
class ConferenceEmailLogs(models.Model):
action = models.CharField(max_length=255)
pub_date = models.DateTimeField(auto_now_add=True)
def __str__(self) -> str:
return "{}".format(self.action)
| 32.590278 | 126 | 0.698061 |
765c307e60a8598ba3064ba78bba927c29c41d7d | 5,456 | py | Python | examples/advanced_operations/get_ad_group_bid_modifiers.py | tridge-hq/google-ads-python | 7c61e0705063324b973a6bdf1d83193c032a9012 | [
"Apache-2.0"
] | null | null | null | examples/advanced_operations/get_ad_group_bid_modifiers.py | tridge-hq/google-ads-python | 7c61e0705063324b973a6bdf1d83193c032a9012 | [
"Apache-2.0"
] | null | null | null | examples/advanced_operations/get_ad_group_bid_modifiers.py | tridge-hq/google-ads-python | 7c61e0705063324b973a6bdf1d83193c032a9012 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to retrieve ad group bid modifiers."""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
_DEFAULT_PAGE_SIZE = 1000
def main(client, customer_id, page_size, ad_group_id=None):
ga_service = client.get_service("GoogleAdsService")
query = """
SELECT
campaign.id,
ad_group.id,
ad_group_bid_modifier.criterion_id,
ad_group_bid_modifier.bid_modifier,
ad_group_bid_modifier.device.type,
ad_group_bid_modifier.hotel_date_selection_type.type,
ad_group_bid_modifier.hotel_advance_booking_window.min_days,
ad_group_bid_modifier.hotel_advance_booking_window.max_days,
ad_group_bid_modifier.hotel_length_of_stay.min_nights,
ad_group_bid_modifier.hotel_length_of_stay.max_nights,
ad_group_bid_modifier.hotel_check_in_day.day_of_week,
ad_group_bid_modifier.hotel_check_in_date_range.start_date,
ad_group_bid_modifier.hotel_check_in_date_range.end_date,
ad_group_bid_modifier.preferred_content.type
FROM ad_group_bid_modifier"""
if ad_group_id:
query += f" WHERE ad_group.id = {ad_group_id}"
search_request = client.get_type("SearchGoogleAdsRequest")
search_request.customer_id = customer_id
search_request.query = query
search_request.page_size = _DEFAULT_PAGE_SIZE
results = ga_service.search(request=search_request)
for row in results:
modifier = row.ad_group_bid_modifier
print(
"Ad group bid modifier with criterion ID "
f"'{modifier.criterion_id}', bid modifier value "
f"'{modifier.bid_modifier or 0.00}', device type "
f"'{modifier.device.type_.name}' was found in ad group with ID "
f"'{row.ad_group.id}' of campaign with ID '{row.campaign.id}'."
)
criterion_field = type(modifier).pb(modifier).WhichOneof("criterion")
criterion_details = f" - Criterion type: {criterion_field}, "
if criterion_field == "device":
criterion_details += f"Type: {modifier.device.type_}"
elif criterion_field == "hotel_advance_booking_window":
criterion_details += (
f"Min Days: {modifier.hotel_advance_booking_window.min_days}, "
f"Max Days: {modifier.hotel_advance_booking_window.max_days}"
)
elif criterion_field == "hotel_check_in_day":
criterion_details += (
f"Day of the week: {modifier.hotel_check_in_day.day_of_week}"
)
elif criterion_field == "hotel_date_selection_type":
criterion_details += f"Date selection type: {modifier.hotel_date_selection_type.type_}"
elif criterion_field == "hotel_length_of_stay":
criterion_details += (
f"Min Nights: {modifier.hotel_length_of_stay.min_nights}, "
f"Max Nights: {modifier.hotel_length_of_stay.max_nights}"
)
elif criterion_field == "preferred_content":
criterion_details += f"Type: {modifier.preferred_content.type_}"
else:
criterion_details = " - No Criterion type found."
print(criterion_details)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description="List ad group bid modifiers for specified customer."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a",
"--ad_group_id",
type=str,
required=False,
help=(
"The ad group ID. Specify this to list ad group "
"bid modifiers solely for this ad group ID."
),
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
_DEFAULT_PAGE_SIZE,
ad_group_id=args.ad_group_id,
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f' Error with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 38.422535 | 99 | 0.662207 |
e1984495dc1b2c59fcd4b8d29bc40f142bb27812 | 19,012 | py | Python | train.py | zhaoguangxiang/OFA | cc1719df2713f0a046f34acb0afd8782e08ea6be | [
"Apache-2.0"
] | 1 | 2022-03-05T15:05:02.000Z | 2022-03-05T15:05:02.000Z | train.py | zhaoguangxiang/OFA | cc1719df2713f0a046f34acb0afd8782e08ea6be | [
"Apache-2.0"
] | null | null | null | train.py | zhaoguangxiang/OFA | cc1719df2713f0a046f34acb0afd8782e08ea6be | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
# We need to setup root logger before importing any fairseq libraries.
logging.basicConfig(
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
import numpy as np
import torch
from fairseq import (
# checkpoint_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
# from fairseq.trainer import Trainer
from omegaconf import DictConfig, OmegaConf
from utils import checkpoint_utils
from trainer import Trainer
def main(cfg: FairseqConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
if distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg:
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
if cfg.common.log_file is not None:
handler = logging.FileHandler(filename=cfg.common.log_file)
logger.addHandler(handler)
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
with fsdp_enable_wrap(cfg.distributed_training):
model = fsdp_wrap(task.build_model(cfg.model))
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. shared model params: {:,} (num. trained: {:,})".format(
sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False)),
sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad)
)
)
logger.info(
"num. expert model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)),
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad),
)
)
# Load valid dataset (we load training data below, based on the latest checkpoint)
# We load the valid dataset AFTER building the model
# data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg)
if cfg.dataset.combine_valid_subsets:
task.load_dataset("valid", combine=True, epoch=1)
else:
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per device = {} and max sentences per device = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=True,
)
if cfg.common.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("load_checkpoint") # wait for all workers
max_epoch = cfg.optimization.max_epoch or math.inf
if max_epoch > 0 and max_epoch != math.inf:
total_num_updates = sum(
math.ceil(len(epoch_itr) / cfg.optimization.update_freq[i])
if i < len(cfg.optimization.update_freq) else
math.ceil(len(epoch_itr) / cfg.optimization.update_freq[-1])
for i in range(max_epoch)
)
trainer.lr_reinit(total_num_updates, trainer.get_num_updates())
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=True,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=True,
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_file=cfg.common.log_file,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for i, sample in enumerate(progress):
if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps:
break
trainer.valid_step(sample)
# log validation stats
if hasattr(task, 'get_valid_stats'):
stats = task.get_valid_stats(cfg, trainer, agg.get_smoothed_values())
else:
stats = agg.get_smoothed_values()
stats = get_valid_stats(cfg, trainer, stats)
if hasattr(task, "post_validate"):
task.post_validate(trainer.get_model(), stats, agg)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}")
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
# if cfg.common.use_plasma_view:
# server.server.kill()
if __name__ == "__main__":
cli_main()
| 36.007576 | 109 | 0.658847 |
83fd7aaaffabb7a483a7f6f86d41e4bfe7809542 | 125 | py | Python | mywebsite/celebrities/urls.py | zncombs/practice_django | fb3e519cd325ddf64cf77a5b597046c306e4ff62 | [
"MIT"
] | null | null | null | mywebsite/celebrities/urls.py | zncombs/practice_django | fb3e519cd325ddf64cf77a5b597046c306e4ff62 | [
"MIT"
] | null | null | null | mywebsite/celebrities/urls.py | zncombs/practice_django | fb3e519cd325ddf64cf77a5b597046c306e4ff62 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
]
| 15.625 | 42 | 0.664 |
9a775a17f4895e3c2875a28a17e3c375bd54b30c | 1,274 | py | Python | library_management/library_management/doctype/library_transaction/library_transaction.py | Dharmraj-48/Library-Managment-Frappe | 970b9d5d58845d9c576257d1ebdca730f1ac42b9 | [
"MIT"
] | null | null | null | library_management/library_management/doctype/library_transaction/library_transaction.py | Dharmraj-48/Library-Managment-Frappe | 970b9d5d58845d9c576257d1ebdca730f1ac42b9 | [
"MIT"
] | null | null | null | library_management/library_management/doctype/library_transaction/library_transaction.py | Dharmraj-48/Library-Managment-Frappe | 970b9d5d58845d9c576257d1ebdca730f1ac42b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, ME and contributors
# For license information, please see license.txt
# from __future__ import unicode_literals
# # import frappe
# from frappe.model.document import Document
# class LibraryTransaction(Document):
# pass
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class LibraryTransaction(Document):
def validate(self):
last_transaction = frappe.get_list("Library Transaction",
fields=["transcation_type", "transcation_date"],
filters = {
"article": self.article,
"transcation_date": ("<=", self.transcation_date),
"name": ("!=", self.name)
})
if self.transcation_type=="Issue":
msg = _("Article {0} {1} has not been recorded as returned since {2}")
if last_transaction and last_transaction[0].transcation_type=="Issue":
frappe.throw(msg.format(self.article, self.article_name,
last_transaction[0].transcation_date))
else:
if not last_transaction or last_transaction[0].transcation_type!="Issue":
frappe.throw(_("Cannot return article not issued")) | 38.606061 | 85 | 0.648352 |
5d2818c257042eaa2d43005e7aebc5bf9f99a14f | 2,237 | py | Python | sources/drawers/console_drawer.py | alxcp/RaspberryPi.ws28x.Drawer | 718e4d3dd64f1229eb1a2726bc57bfe3829dff2b | [
"MIT"
] | null | null | null | sources/drawers/console_drawer.py | alxcp/RaspberryPi.ws28x.Drawer | 718e4d3dd64f1229eb1a2726bc57bfe3829dff2b | [
"MIT"
] | null | null | null | sources/drawers/console_drawer.py | alxcp/RaspberryPi.ws28x.Drawer | 718e4d3dd64f1229eb1a2726bc57bfe3829dff2b | [
"MIT"
] | null | null | null | import time
from colr import color
from sources.helpers.color_rgb import ColorRGB
from sources.drawers.base import DrawerBase
class ConsoleDrawer(DrawerBase):
calibration_table = None
calibrate = False
intensity_min = 170
intensity_max = 255
def __init__(self, n_led):
super().__init__(n_led)
self.pixels = [ColorRGB(0, 0, 0)] * n_led
self.pixels_indexes = range(0, n_led)
def set_color(self, position, target_color, calibrate=None):
if calibrate is None:
calibrate = self.calibrate
if calibrate:
self.pixels[position] = self.calibrate_color(target_color)
else:
self.pixels[position] = target_color
def set_color_raw(self, position, r, g, b, calibrate=None):
if calibrate is None:
calibrate = self.calibrate
if calibrate:
self.pixels[position] = ColorRGB(
self.calibration_table[int(r)],
self.calibration_table[int(g)],
self.calibration_table[int(b)])
else:
self.pixels[position] = ColorRGB(int(r), int(g), int(b))
def set_empty(self, position):
self.pixels[position] = ColorRGB()
def show(self):
row = ""
for position in self.pixels_indexes:
pixel = self.pixels[position]
if pixel is None:
row += color('█', fore=(0, 0, 0))
else:
row += color('█', fore=(pixel.r, pixel.g, pixel.b))
time.sleep(0.05)
print(f'\r{row}', end='\r')
if self.recording:
self.frames.append(self.pixels.copy())
def clear(self, show=True):
for position in self.pixels_indexes:
self.set_empty(position)
def calibrate_color(self, original_color):
return ColorRGB(
self.calibration_table[original_color.r],
self.calibration_table[original_color.g],
self.calibration_table[original_color.b])
def show_frame(self, frame):
self.pixels = frame
self.show()
def replay(self, timeout):
while not timeout.is_expired():
for frame in self.frames:
self.show_frame(frame)
| 28.679487 | 70 | 0.592311 |
6d0cd961cde46dd57a0ce87c5fb5fcaa43a8c8ec | 4,067 | py | Python | ox_mon/common/interface.py | emin63/ox_mon | 965a36c430950c47d3cce79486c1ab2cc5ee89a4 | [
"MIT"
] | null | null | null | ox_mon/common/interface.py | emin63/ox_mon | 965a36c430950c47d3cce79486c1ab2cc5ee89a4 | [
"MIT"
] | null | null | null | ox_mon/common/interface.py | emin63/ox_mon | 965a36c430950c47d3cce79486c1ab2cc5ee89a4 | [
"MIT"
] | null | null | null | """Common interface for various types of monitors in ox_mon
"""
import datetime
import logging
from ox_mon.common import exceptions
from ox_mon.common import noters
class OxMonTask:
"""Abstract class representing interface for ox mon task
Each task should implement the following interface:
1. Have an `__init__` method that takes a config object.
2. Have a _do_task method (see docs below).
3. Have an options method (see docs below).
With those features implemented, it is easy to turn run tasks in
a consistent way by creating an instance and running the run method.
This can be done either using pure python or through the command line
as documented elsewhere.
"""
def _do_task(self) -> str:
"""Main method to run task; raise OxMonAlarm if problem found.
This method should do whatever it needs to execute the monitoring task.
If everything is OK, it can return a string status message if desired or
just return None. If problems are found, this should raise an instance
of OxMonAlarm describing the problem.
That will be caught by the run method and a notification will be sent
to the appropriate place.
Users should call `run` which calls this method; do not call this
method directlry.
"""
raise NotImplementedError
@classmethod
def options(cls):
"""Return list of configs.OxMonOption for configure this task.
"""
raise NotImplementedError
def __init__(self, config):
self.config = config
def notify(self, subject: str, msg: str):
"""Notify for given subject and message.
:param subject: String subject for notification.
:param msg: String message for notification.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Go through all notifiers in self.config.notifiers and
send the given notification message.
"""
for ntype in self.config.notifier:
my_noter = noters.make_notifier(ntype, self.config)
my_noter.send(subject, msg)
def run(self):
"""Run the _do_task method with notification, etc.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
"""
try:
result = self._do_task()
return result
except exceptions.OxMonAlarm as ox_alarm:
logging.info(
'Raising ox_mon alarm: %s; will attempt to notify', ox_alarm)
summary = 'ox_mon alarm for %s' % self.__class__.__name__
self.notify(summary, '%s at UTC %s\n%s' % (
summary, datetime.datetime.utcnow(), str(ox_alarm)))
raise
except exceptions.OxMonException as ox_prob:
logging.warning(
'Got ox_mon exception: %s; will attempt to notify', ox_prob)
self.notify('Error: ox_mon failed',
'Error: ox_mon failed:\n%s' % str(ox_prob))
raise
except Exception as unexpected:
logging.error(
'Got unexpected exception %s; will attempt to notify', str(
unexpected))
self.notify('Error: ox_mon unexpected exception',
'Error: ox_mon unexpected exception:\n%s' % str(
unexpected))
raise
class Checker(OxMonTask):
"""Abstract class representing interface for Checker.
A Checker is a sub-class of OxMonTask to check something but
not really modify the system itself. Sub-classes should
implement the _check method.
"""
def _check(self) -> str:
"""Main method to check system; raise OxMonAlarm if problem found.
This method should do whatever it needs to check the status of the system.
If everything is OK, it can return a string status message if desired or
just return None. If problems are found, this should raise an instance
of OxMonAlarm describing the problem.
"""
raise NotImplementedError
def _do_task(self) -> str:
"Just return _check()"
return self._check()
| 33.61157 | 77 | 0.634866 |
5d20d8500cd369b5790e69fae73d72eeab506581 | 1,609 | py | Python | tethysapp/modflow/job_executables/update_resource_status.py | Aquaveo/tethysapp-modflow | 5e662d8346f2ffd414ac912a531eef06c5ae79d9 | [
"BSD-3-Clause"
] | null | null | null | tethysapp/modflow/job_executables/update_resource_status.py | Aquaveo/tethysapp-modflow | 5e662d8346f2ffd414ac912a531eef06c5ae79d9 | [
"BSD-3-Clause"
] | null | null | null | tethysapp/modflow/job_executables/update_resource_status.py | Aquaveo/tethysapp-modflow | 5e662d8346f2ffd414ac912a531eef06c5ae79d9 | [
"BSD-3-Clause"
] | null | null | null | #!/opt/tethys-python
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from modflow_adapter.models.app_users.modflow_model_resource import ModflowModelResource
def run(workflow, resource_db_url, resource_id):
resource_db_session = None
try:
# Get resource
resource_db_engine = create_engine(resource_db_url)
make_resource_db_session = sessionmaker(bind=resource_db_engine)
resource_db_session = make_resource_db_session()
resource = resource_db_session.query(ModflowModelResource).get(resource_id)
status_success = False
# Get status for upload keys
if workflow == 'upload':
upload_status = resource.get_status(ModflowModelResource.UPLOAD_STATUS_KEY, None)
upload_gs_status = resource.get_status(ModflowModelResource.UPLOAD_GS_STATUS_KEY, None)
upload_status_ok = upload_status in ModflowModelResource.OK_STATUSES
upload_gs_status_ok = upload_gs_status in ModflowModelResource.OK_STATUSES
status_success = upload_status_ok and upload_gs_status_ok
# Set root status accordingly
if status_success:
resource.set_status(ModflowModelResource.ROOT_STATUS_KEY, ModflowModelResource.STATUS_SUCCESS)
else:
resource.set_status(ModflowModelResource.ROOT_STATUS_KEY, ModflowModelResource.STATUS_FAILED)
resource_db_session.commit()
finally:
resource_db_session and resource_db_session.close()
if __name__ == '__main__':
args = sys.argv
args.pop(0)
run(*args)
| 34.978261 | 106 | 0.740833 |
36a65d16899ac01b9143a7bd6ac77b13d5471858 | 1,573 | py | Python | MCQ Scraper.py | sdevandran/MCQ-Scraper | 53f29ed4acd580dfdd7a53d5413690f6cc645fa0 | [
"MIT"
] | null | null | null | MCQ Scraper.py | sdevandran/MCQ-Scraper | 53f29ed4acd580dfdd7a53d5413690f6cc645fa0 | [
"MIT"
] | null | null | null | MCQ Scraper.py | sdevandran/MCQ-Scraper | 53f29ed4acd580dfdd7a53d5413690f6cc645fa0 | [
"MIT"
] | null | null | null | import requests
import re
from bs4 import BeautifulSoup
chapters = 32
# https://pinoybix.org/2015/10/practice-quiz-in-data-communications-and-networking.html
response = requests.get(
'https://pinoybix.org/2020/02/quiz-in-data-communications-and-networking-ece-board-exam.html')
a = response.text
z = re.findall('<div style="border-top: #ffd324 1px solid; border-right: #ffd324 1px solid; background: #fff6bf; border-bottom: #ffd324 1px solid; padding-bottom: 10px; padding-top: 10px; padding-left: 10px; margin: 10px 0px; border-left: #ffd324 1px solid; padding-right: 10px" align="justify"> <strong>(.*)</strong></div>', a)
z = z[:chapters]
a = z
for z, i in enumerate(a):
print(f'Chapter {z + 1}')
for j in i.split('</a>, <a'):
p = re.findall(
'title="(.*)" href="(.*)" rel="noopener noreferrer"', j)[0]
print(p[0].replace('MCQs', 'Quiz').replace('MCQ', 'Quiz'))
response = requests.get(p[1])
sdata = response.text
soup = BeautifulSoup(sdata, features="lxml")
try:
mydivs = soup.findAll('div', {"class": "wps-active"})
sdata = f'''{mydivs[0]}'''
except:
mydivs = soup.findAll('div', {"class": "pf-content"})
sdata = f'''{mydivs[0]}'''
soup = BeautifulSoup(sdata, features="lxml")
p = soup.find_all('p')
for i in p:
if 'NEXT: MCQ in' not in i.text:
print(i.text.replace(
'Choose the letter of the best answer in each questions.', ''))
| 47.666667 | 334 | 0.586777 |
d035aecc9aad87c742e98e4a65baae134b96aae4 | 4,188 | py | Python | tests/python/pants_test/help/test_build_dictionary_info_extracter.py | mosesn/pants | 7fddcc5d68272a03e9a2b4224b8da728d35eabb2 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/help/test_build_dictionary_info_extracter.py | mosesn/pants | 7fddcc5d68272a03e9a2b4224b8da728d35eabb2 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/help/test_build_dictionary_info_extracter.py | mosesn/pants | 7fddcc5d68272a03e9a2b4224b8da728d35eabb2 | [
"Apache-2.0"
] | 1 | 2019-06-10T17:24:34.000Z | 2019-06-10T17:24:34.000Z | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.build_graph.build_file_aliases import BuildFileAliases, TargetMacro
from pants.build_graph.target import Target
from pants.help.build_dictionary_info_extracter import (BuildDictionaryInfoExtracter, FunctionArg,
TargetTypeInfo)
class BuildDictionaryInfoExtracterTest(unittest.TestCase):
def test_get_description_from_docstring(self):
class Test1(object):
"""First line.
Subsequent lines.
"""
self.assertEqual('First line.',
BuildDictionaryInfoExtracter.get_description_from_docstring(Test1))
class Test2(object):
"""Only one line."""
self.assertEqual('Only one line.',
BuildDictionaryInfoExtracter.get_description_from_docstring(Test2))
def test_get_arg_descriptions_from_docstring(self):
def func(a, b, c):
"""Foo function.
:param a: Parameter a.
:param str b: Parameter b.
:param c: Parameter c.
"""
self.assertEqual({'a': 'Parameter a.', 'b': 'Parameter b.', 'c': 'Parameter c.'},
BuildDictionaryInfoExtracter.get_arg_descriptions_from_docstring(func))
def test_get_function_args(self):
# Test standalone function.
def func(arg1, arg2, arg3=42, arg4=None, arg5='foo'):
pass
self.assertEqual([FunctionArg('arg1', '', False, None), FunctionArg('arg2', '', False, None),
FunctionArg('arg3', '', True, 42), FunctionArg('arg4', '', True, None),
FunctionArg('arg5', '', True, 'foo')],
BuildDictionaryInfoExtracter.get_function_args(func))
# Test member function.
class TestCls(object):
def __init__(self, arg1, arg2=False):
pass
self.assertEqual([FunctionArg('arg1', '', False, None), FunctionArg('arg2', '', True, False)],
BuildDictionaryInfoExtracter.get_function_args(TestCls.__init__))
def test_get_target_args(self):
class Target1(Target):
def __init__(self, arg1, arg2=42, **kwargs):
"""
:param arg1: The first arg.
:param arg2: The second arg.
"""
super(Target1, self).__init__(**kwargs)
class Target2(Target1):
pass
class Target3(Target2):
def __init__(self, arg3, arg4=None, **kwargs):
super(Target1, self).__init__(**kwargs)
self.maxDiff = None
self.assertEqual(BuildDictionaryInfoExtracter.basic_target_args + [
FunctionArg('arg1', 'The first arg.', False, None),
FunctionArg('arg2', 'The second arg.', True, 42),
FunctionArg('arg3', '', False, None),
FunctionArg('arg4', '', True, None)
],
BuildDictionaryInfoExtracter.get_target_args(Target3))
def test_get_target_type_info(self):
class Target1(Target):
"""Target1 docstring."""
pass
class Target2a(Target):
# No docstring, so we should take the onefrom Target2b.
pass
class Target2b(Target):
"""Target2 docstring."""
pass
class Target3(Target):
"""Target3 docstring."""
pass
# We shouldn't get as far as invoking the context factory, so it can be trivial.
macro_factory = TargetMacro.Factory.wrap(lambda ctx: None, Target2a, Target2b)
bfa = BuildFileAliases(targets={
'target1': Target1,
'target2': macro_factory,
'target3': Target3,
},
objects={},
context_aware_object_factories={}
)
extracter = BuildDictionaryInfoExtracter(bfa)
self.assertEquals([TargetTypeInfo('target1', 'Target1 docstring.'),
TargetTypeInfo('target2', 'Target2 docstring.'),
TargetTypeInfo('target3', 'Target3 docstring.')],
extracter.get_target_type_info())
| 34.04878 | 98 | 0.624881 |
9c56925cd7e8838c611aad58fb5a2bf28d91beb8 | 1,313 | py | Python | cairis/gui/PersonaCharacteristicDialogParameters.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | cairis/gui/PersonaCharacteristicDialogParameters.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | cairis/gui/PersonaCharacteristicDialogParameters.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import DialogClassParameters
class PersonaCharacteristicDialogParameters(DialogClassParameters.DialogClassParameters):
def __init__(self,winId,winLabel,dClass,createId,setterFn,creationFlag,pName,bvName):
DialogClassParameters.DialogClassParameters.__init__(self,winId,winLabel,dClass,createId,setterFn,creationFlag)
self.thePersonaName = pName
self.theBehaviouralVariable = bvName
def persona(self): return self.thePersonaName
def behaviouralVariable(self): return self.theBehaviouralVariable
| 45.275862 | 115 | 0.794364 |
97ba1ae551da9c61fb99a13a723a2fce8381e3ff | 3,180 | py | Python | sourceTransferLearn/predict.py | ZombieSocrates/ml-udacity-capstone | 621cb4896ae3ce32c0eb6ee46d4783b6b0ab4bbc | [
"MIT"
] | null | null | null | sourceTransferLearn/predict.py | ZombieSocrates/ml-udacity-capstone | 621cb4896ae3ce32c0eb6ee46d4783b6b0ab4bbc | [
"MIT"
] | null | null | null | sourceTransferLearn/predict.py | ZombieSocrates/ml-udacity-capstone | 621cb4896ae3ce32c0eb6ee46d4783b6b0ab4bbc | [
"MIT"
] | null | null | null | # import libraries
import os
import numpy as np
import torch
from six import BytesIO
from train import _get_pretrained_model
# default content type is numpy array
NP_CONTENT_TYPE = 'application/x-npy'
# Provided model load function
def model_fn(model_dir):
'''Load the PyTorch model from the `model_dir` directory.'''
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = _get_pretrained_model(model_name = model_info["model_name"],
hidden_units = model_info["hidden_units"],
dropout = model_info["dropout"])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# set to eval mode, could use no_grad
model.to(device).eval()
print("Done loading model.")
return model
# Provided input data loading
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == NP_CONTENT_TYPE:
stream = BytesIO(serialized_input_data)
return np.load(stream)
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
# Provided output data handling
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
if accept == NP_CONTENT_TYPE:
stream = BytesIO()
np.save(stream, prediction_output)
return stream.getvalue(), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
# Modified predict function
def predict_fn(input_data, model, topk = 5):
'''Moves serialized numpy input to available GPU, converts it back to a
Torch tensor, applies our model to the data, and gets back class labels
and associated probabilities for that input. Will return the N most likely
classes.
Inputs:
input_data: a numpy array that has already had ImageFolder transforms
applied to it (AKA, passed in from a DataLoader)
model: the PyTorch model from model_fn
topk: controls how many of the most likely classes will be returned by
the model
Output:
a numpy array with two sub arrays, the first being the class labels
and the second being the associated probabilities
'''
print('Predicting top {} class labels for the input data...'.format(topk))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data = torch.from_numpy(input_data.astype('float32'))
data = data.to(device)
model.eval()
out = model(data)
ps = torch.exp(out)
top_ps, top_class = ps.topk(topk, dim = 1)
classes_np = top_class.cpu().detach().numpy()
probs_np = top_ps.cpu().detach().numpy()
out_array = np.array([classes_np, probs_np])
return out_array | 34.193548 | 89 | 0.697484 |
b90d81514ffce5b4f88cb2c91d2960bb6c69f9f8 | 5,108 | py | Python | test/functional/feature_versionbits_warning.py | orobio/gulden-official | a329faf163b15eabc7ff1d9f07ea87f66df8d27d | [
"MIT"
] | 158 | 2016-01-08T10:38:37.000Z | 2022-02-01T06:28:05.000Z | test/functional/feature_versionbits_warning.py | orobio/gulden-official | a329faf163b15eabc7ff1d9f07ea87f66df8d27d | [
"MIT"
] | 196 | 2015-11-19T10:59:24.000Z | 2021-10-07T14:52:13.000Z | test/functional/feature_versionbits_warning.py | orobio/gulden-official | a329faf163b15eabc7ff1d9f07ea87f66df8d27d | [
"MIT"
] | 71 | 2016-06-25T23:29:04.000Z | 2022-03-14T10:57:19.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test version bits warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
"""
import os
import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import wait_until
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
VB_PATTERN = re.compile("Warning: unknown new rules activated.*versionbit")
class VersionBitsWarningTest(GuldenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8'):
pass
self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
self.setup_nodes()
def send_blocks_with_version(self, peer, numblocks, version):
"""Send numblocks blocks to peer with version set"""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"] + 1
tip = int(tip, 16)
for _ in range(numblocks):
block = create_block(tip, create_coinbase(height + 1), block_time)
block.nVersion = version
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def versionbits_in_alert_file(self):
"""Test that the versionbits warning has been written to the alert file."""
alert_text = open(self.alert_filename, 'r', encoding='utf8').read()
return VB_PATTERN.search(alert_text) is not None
def run_test(self):
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
node_deterministic_address = node.get_deterministic_priv_key().address
# Mine one period worth of blocks
node.generatetoaddress(VB_PERIOD, node_deterministic_address)
self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
# Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
node.generatetoaddress(VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address)
# Check that we're not getting any versionbit-related errors in get*info()
assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD, VB_UNKNOWN_VERSION)
node.generatetoaddress(VB_PERIOD - VB_THRESHOLD, node_deterministic_address)
self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared. This will move the versionbit state to ACTIVE.
node.generatetoaddress(VB_PERIOD, node_deterministic_address)
# Stop-start the node. This is required because GuldenD will only warn once about unknown versions or unknown rules activating.
self.restart_node(0)
# Generating one block guarantees that we'll get out of IBD
node.generatetoaddress(1, node_deterministic_address)
wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=mininode_lock)
# Generating one more block will be enough to generate an error.
node.generatetoaddress(1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
# Check that the alert file shows the versionbits unknown rules warning
wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
if __name__ == '__main__':
VersionBitsWarningTest().main()
| 48.647619 | 137 | 0.72083 |
b58a885afe2a27e9d952474c2192975b67ccea88 | 2,809 | py | Python | pybvc/common/result.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | 1 | 2015-11-22T15:53:00.000Z | 2015-11-22T15:53:00.000Z | pybvc/common/result.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | null | null | null | pybvc/common/result.py | brocade/pybvc | 316e8cb79ecbeb3670276afd43286e57897bc8ba | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2015
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES;LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
result.py: Result of HTTP communication session (status and data)
"""
from pybvc.common.status import OperStatus
#-------------------------------------------------------------------------------
# Class 'Result'
#-------------------------------------------------------------------------------
class Result(object):
""" Result of completed HTTP session (status and data) """
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, status=None, data=None):
""" Initializes this object properties. """
if isinstance(status, OperStatus) == False:
raise TypeError(status)
self.status = status
self.data = data
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_status(self):
assert (self.status != None)
return self.status
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_data(self):
return self.data
| 43.215385 | 80 | 0.574938 |
7f7fa17daa5a57444c3960b43e1f9f81518e3a26 | 1,992 | py | Python | scripts/rf_sys_inventory.py | billdodd/Redfish-Tacklebox | b2337e8fb33a90bef720a1b4b1149658d35c8c57 | [
"FSFAP"
] | null | null | null | scripts/rf_sys_inventory.py | billdodd/Redfish-Tacklebox | b2337e8fb33a90bef720a1b4b1149658d35c8c57 | [
"FSFAP"
] | null | null | null | scripts/rf_sys_inventory.py | billdodd/Redfish-Tacklebox | b2337e8fb33a90bef720a1b4b1149658d35c8c57 | [
"FSFAP"
] | null | null | null | #! /usr/bin/python
# Copyright Notice:
# Copyright 2019-2020 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Tacklebox/blob/master/LICENSE.md
"""
Redfish System Inventory
File : rf_sys_inventory.py
Brief : This script uses the redfish_utilities module to dump system inventory
information
"""
import argparse
import redfish
import redfish_utilities
import json
# Get the input arguments
argget = argparse.ArgumentParser( description = "A tool to walk a Redfish service and list component information" )
argget.add_argument( "--user", "-u", type = str, required = True, help = "The user name for authentication" )
argget.add_argument( "--password", "-p", type = str, required = True, help = "The password for authentication" )
argget.add_argument( "--rhost", "-r", type = str, required = True, help = "The address of the Redfish service (with scheme)" )
argget.add_argument( "--details", "-details", action = "store_true", help = "Indicates if the full details of each component should be shown" )
argget.add_argument( "--noabsent", "-noabsent", action = "store_true", help = "Indicates if absent devices should be skipped" )
argget.add_argument( "--write", "-w", nargs = "?", const = "Device_Inventory", type = str, help = "Indicates if the inventory should be written to a spreadsheet and what the file name should be if given" )
args = argget.parse_args()
# Set up the Redfish object
redfish_obj = redfish.redfish_client( base_url = args.rhost, username = args.user, password = args.password )
redfish_obj.login( auth = "session" )
try:
# Get and print the system inventory
inventory = redfish_utilities.get_system_inventory( redfish_obj )
redfish_utilities.print_system_inventory( inventory, details = args.details, skip_absent = args.noabsent )
if( args.write ):
redfish_utilities.write_system_inventory( inventory, args.write )
finally:
# Log out
redfish_obj.logout()
| 43.304348 | 205 | 0.73494 |
87136594fdb7bc800b5cc6758c18fde8b765f9cc | 5,994 | py | Python | spoopy/tools/classifier/keras_to_tensorflow.py | rodrigobressan/PADify | 362db2b3a33793ac53f938e89f90a6ecdf778e89 | [
"MIT"
] | 12 | 2019-11-26T07:44:08.000Z | 2021-03-03T09:51:43.000Z | spoopy/tools/classifier/keras_to_tensorflow.py | rodrigobressan/PADify | 362db2b3a33793ac53f938e89f90a6ecdf778e89 | [
"MIT"
] | 13 | 2020-01-28T22:09:41.000Z | 2022-03-11T23:43:37.000Z | spoopy/tools/classifier/keras_to_tensorflow.py | rodrigobressan/PADify | 362db2b3a33793ac53f938e89f90a6ecdf778e89 | [
"MIT"
] | 5 | 2020-01-02T09:52:42.000Z | 2022-02-21T15:45:23.000Z |
# coding: utf-8
# In[ ]:
"""
Copyright (c) 2017, by the Authors: Amir H. Abdi
This software is freely available under the MIT Public License.
Please see the License file in the root for details.
The following code snippet will convert the keras model file,
which is saved using model.save('kerasmodel_weight_file'),
to the freezed .pb tensorflow weight file which holds both the
network architecture and its associated weights.
""";
# In[ ]:
import os
'''
Input arguments:
num_output: this value has nothing to do with the number of classes, batch_size, etc.,
and it is mostly equal to 1. If the network is a **multi-stream network**
(forked network with multiple outputs), set the value to the number of outputs.
quantize: if set to True, use the quantize feature of Tensorflow
(https://www.tensorflow.org/performance/quantization) [default: False]
use_theano: Thaeno and Tensorflow implement convolution in different ways.
When using Keras with Theano backend, the order is set to 'channels_first'.
This feature is not fully tested, and doesn't work with quantizization [default: False]
input_fld: directory holding the keras weights file [default: .]
output_fld: destination directory to save the tensorflow files [default: .]
input_model_file: name of the input weight file [default: 'model.h5']
output_model_file: name of the output weight file [default: args.input_model_file + '.pb']
graph_def: if set to True, will write the graph definition as an ascii file [default: False]
output_graphdef_file: if graph_def is set to True, the file name of the
graph definition [default: model.ascii]
output_node_prefix: the prefix to use for output nodes. [default: output_node]
'''
# Parse input arguments
# In[ ]:
import argparse
parser = argparse.ArgumentParser(description='set input arguments')
parser.add_argument('-input_fld', action="store",
dest='input_fld', type=str, default='.')
parser.add_argument('-output_fld', action="store",
dest='output_fld', type=str, default='')
parser.add_argument('-input_model_file', action="store",
dest='input_model_file', type=str, default='model.h5')
parser.add_argument('-output_model_file', action="store",
dest='output_model_file', type=str, default='')
parser.add_argument('-output_graphdef_file', action="store",
dest='output_graphdef_file', type=str, default='model.ascii')
parser.add_argument('-num_outputs', action="store",
dest='num_outputs', type=int, default=1)
parser.add_argument('-graph_def', action="store",
dest='graph_def', type=bool, default=True)
parser.add_argument('-output_node_prefix', action="store",
dest='output_node_prefix', type=str, default='output_node')
parser.add_argument('-quantize', action="store",
dest='quantize', type=bool, default=False)
parser.add_argument('-theano_backend', action="store",
dest='theano_backend', type=bool, default=False)
parser.add_argument('-f')
args = parser.parse_args()
parser.print_help()
print('input args: ', args)
if args.theano_backend is True and args.quantize is True:
raise ValueError("Quantize feature does not work with theano backend.")
# initialize
# In[ ]:
from keras.models import load_model
import tensorflow as tf
from pathlib import Path
from keras import backend as K
output_fld = args.input_fld if args.output_fld == '' else args.output_fld
if args.output_model_file == '':
args.output_model_file = str(Path(args.input_model_file).name) + '.pb'
if not os.path.exists(output_fld):
Path(output_fld).mkdir(parents=True)
weight_file_path = str(Path(args.input_fld) / args.input_model_file)
# Load keras model and rename output
# In[ ]:
K.set_learning_phase(0)
if args.theano_backend:
K.set_image_data_format('channels_first')
else:
K.set_image_data_format('channels_last')
try:
net_model = load_model(weight_file_path)
except ValueError as err:
print('''Input file specified ({}) only holds the weights, and not the model defenition.
Save the model using mode.save(filename.h5) which will contain the network architecture
as well as its weights.
If the model is saved using model.save_weights(filename.h5), the model architecture is
expected to be saved separately in a json format and loaded prior to loading the weights.
Check the keras documentation for more details (https://keras.io/getting-started/faq/)'''
.format(weight_file_path))
raise err
num_output = args.num_outputs
pred = [None]*num_output
pred_node_names = [None]*num_output
for i in range(num_output):
pred_node_names[i] = args.output_node_prefix+str(i)
pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)
# [optional] write graph definition in ascii
# In[ ]:
sess = K.get_session()
if args.graph_def:
f = args.output_graphdef_file
tf.train.write_graph(sess.graph.as_graph_def(), output_fld, f, as_text=True)
print('saved the graph definition in ascii format at: ', str(Path(output_fld) / f))
# convert variables to constants and save
# In[ ]:
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
if args.quantize:
from tensorflow.tools.graph_transforms import TransformGraph
transforms = ["quantize_weights", "quantize_nodes"]
transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [], pred_node_names, transforms)
constant_graph = graph_util.convert_variables_to_constants(sess, transformed_graph_def, pred_node_names)
else:
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph, output_fld, args.output_model_file, as_text=False)
print('saved the freezed graph (ready for inference) at: ', str(Path(output_fld) / args.output_model_file))
| 37 | 112 | 0.737905 |
c570d0871eb27d3607cdd05fbe0218ffd36b9a2e | 35,599 | py | Python | stp_zmq/zstack.py | evernym/indy-plenum | dc390caa16c0b15dcc549d557ede6f64c0c1b842 | [
"Apache-2.0"
] | null | null | null | stp_zmq/zstack.py | evernym/indy-plenum | dc390caa16c0b15dcc549d557ede6f64c0c1b842 | [
"Apache-2.0"
] | null | null | null | stp_zmq/zstack.py | evernym/indy-plenum | dc390caa16c0b15dcc549d557ede6f64c0c1b842 | [
"Apache-2.0"
] | 2 | 2017-12-13T21:14:54.000Z | 2021-06-06T15:48:03.000Z | import inspect
from stp_core.common.config.util import getConfig
from stp_core.common.constants import CONNECTION_PREFIX, ZMQ_NETWORK_PROTOCOL
try:
import ujson as json
except ImportError:
import json
import os
import shutil
import sys
import time
from binascii import hexlify, unhexlify
from collections import deque
from typing import Mapping, Tuple, Any, Union, Optional
from common.exceptions import PlenumTypeError, PlenumValueError
# import stp_zmq.asyncio
import zmq.auth
from stp_core.crypto.nacl_wrappers import Signer, Verifier
from stp_core.crypto.util import isHex, ed25519PkToCurve25519
from stp_core.network.exceptions import PublicKeyNotFoundOnDisk, VerKeyNotFoundOnDisk
from stp_zmq.authenticator import MultiZapAuthenticator
from zmq.utils import z85
import zmq
from stp_core.common.log import getlogger
from stp_core.network.network_interface import NetworkInterface
from stp_zmq.util import createEncAndSigKeys, \
moveKeyFilesToCorrectLocations, createCertsFromKeys
from stp_zmq.remote import Remote, set_keepalive, set_zmq_internal_queue_size
from plenum.common.exceptions import InvalidMessageExceedingSizeException
from stp_core.validators.message_length_validator import MessageLenValidator
logger = getlogger()
# TODO: Use Async io
# TODO: There a number of methods related to keys management,
# they can be moved to some class like KeysManager
class ZStack(NetworkInterface):
# Assuming only one listener per stack for now.
PublicKeyDirName = 'public_keys'
PrivateKeyDirName = 'private_keys'
VerifKeyDirName = 'verif_keys'
SigKeyDirName = 'sig_keys'
sigLen = 64
pingMessage = 'pi'
pongMessage = 'po'
healthMessages = {pingMessage.encode(), pongMessage.encode()}
# TODO: This is not implemented, implement this
messageTimeout = 3
_RemoteClass = Remote
def __init__(self, name, ha, basedirpath, msgHandler, restricted=True,
seed=None, onlyListener=False, config=None, msgRejectHandler=None, queue_size=0):
self._name = name
self.ha = ha
self.basedirpath = basedirpath
self.msgHandler = msgHandler
self.seed = seed
self.queue_size = queue_size
self.config = config or getConfig()
self.msgRejectHandler = msgRejectHandler or self.__defaultMsgRejectHandler
self.listenerQuota = self.config.DEFAULT_LISTENER_QUOTA
self.senderQuota = self.config.DEFAULT_SENDER_QUOTA
self.msgLenVal = MessageLenValidator(self.config.MSG_LEN_LIMIT)
self.homeDir = None
# As of now there would be only one file in secretKeysDir and sigKeyDir
self.publicKeysDir = None
self.secretKeysDir = None
self.verifKeyDir = None
self.sigKeyDir = None
self.signer = None
self.verifiers = {}
self.setupDirs()
self.setupOwnKeysIfNeeded()
self.setupSigning()
# self.poller = test.asyncio.Poller()
self.restricted = restricted
self.ctx = None # type: Context
self.listener = None
self.auth = None
# Each remote is identified uniquely by the name
self._remotes = {} # type: Dict[str, Remote]
self.remotesByKeys = {}
# Indicates if this stack will maintain any remotes or will
# communicate simply to listeners. Used in ClientZStack
self.onlyListener = onlyListener
self.peersWithoutRemotes = set()
self._conns = set() # type: Set[str]
self.rxMsgs = deque()
self._created = time.perf_counter()
self.last_heartbeat_at = None
def __defaultMsgRejectHandler(self, reason: str, frm):
pass
@property
def remotes(self):
return self._remotes
@property
def created(self):
return self._created
@property
def name(self):
return self._name
@staticmethod
def isRemoteConnected(r) -> bool:
return r.isConnected
def removeRemote(self, remote: Remote, clear=True):
"""
Currently not using clear
"""
name = remote.name
pkey = remote.publicKey
vkey = remote.verKey
if name in self.remotes:
self.remotes.pop(name)
self.remotesByKeys.pop(pkey, None)
self.verifiers.pop(vkey, None)
else:
logger.debug('No remote named {} present')
@staticmethod
def initLocalKeys(name, baseDir, sigseed, override=False):
sDir = os.path.join(baseDir, '__sDir')
eDir = os.path.join(baseDir, '__eDir')
os.makedirs(sDir, exist_ok=True)
os.makedirs(eDir, exist_ok=True)
(public_key, secret_key), (verif_key, sig_key) = createEncAndSigKeys(eDir,
sDir,
name,
seed=sigseed)
homeDir = ZStack.homeDirPath(baseDir, name)
verifDirPath = ZStack.verifDirPath(homeDir)
sigDirPath = ZStack.sigDirPath(homeDir)
secretDirPath = ZStack.secretDirPath(homeDir)
pubDirPath = ZStack.publicDirPath(homeDir)
for d in (homeDir, verifDirPath, sigDirPath, secretDirPath, pubDirPath):
os.makedirs(d, exist_ok=True)
moveKeyFilesToCorrectLocations(sDir, verifDirPath, sigDirPath)
moveKeyFilesToCorrectLocations(eDir, pubDirPath, secretDirPath)
shutil.rmtree(sDir)
shutil.rmtree(eDir)
return hexlify(public_key).decode(), hexlify(verif_key).decode()
@staticmethod
def initRemoteKeys(name, remoteName, baseDir, verkey, override=False):
homeDir = ZStack.homeDirPath(baseDir, name)
verifDirPath = ZStack.verifDirPath(homeDir)
pubDirPath = ZStack.publicDirPath(homeDir)
for d in (homeDir, verifDirPath, pubDirPath):
os.makedirs(d, exist_ok=True)
if isHex(verkey):
verkey = unhexlify(verkey)
createCertsFromKeys(verifDirPath, remoteName, z85.encode(verkey))
public_key = ed25519PkToCurve25519(verkey)
createCertsFromKeys(pubDirPath, remoteName, z85.encode(public_key))
def onHostAddressChanged(self):
# we don't store remote data like ip, port, domain name, etc, so
# nothing to do here
pass
@staticmethod
def areKeysSetup(name, baseDir):
homeDir = ZStack.homeDirPath(baseDir, name)
verifDirPath = ZStack.verifDirPath(homeDir)
pubDirPath = ZStack.publicDirPath(homeDir)
sigDirPath = ZStack.sigDirPath(homeDir)
secretDirPath = ZStack.secretDirPath(homeDir)
for d in (verifDirPath, pubDirPath):
if not os.path.isfile(os.path.join(d, '{}.key'.format(name))):
return False
for d in (sigDirPath, secretDirPath):
if not os.path.isfile(os.path.join(d, '{}.key_secret'.format(name))):
return False
return True
@staticmethod
def keyDirNames():
return ZStack.PublicKeyDirName, ZStack.PrivateKeyDirName, \
ZStack.VerifKeyDirName, ZStack.SigKeyDirName
@staticmethod
def getHaFromLocal(name, basedirpath):
return None
def __repr__(self):
return self.name
@staticmethod
def homeDirPath(baseDirPath, name):
return os.path.join(os.path.expanduser(baseDirPath), name)
@staticmethod
def publicDirPath(homeDirPath):
return os.path.join(homeDirPath, ZStack.PublicKeyDirName)
@staticmethod
def secretDirPath(homeDirPath):
return os.path.join(homeDirPath, ZStack.PrivateKeyDirName)
@staticmethod
def verifDirPath(homeDirPath):
return os.path.join(homeDirPath, ZStack.VerifKeyDirName)
@staticmethod
def sigDirPath(homeDirPath):
return os.path.join(homeDirPath, ZStack.SigKeyDirName)
@staticmethod
def learnKeysFromOthers(baseDir, name, others):
homeDir = ZStack.homeDirPath(baseDir, name)
verifDirPath = ZStack.verifDirPath(homeDir)
pubDirPath = ZStack.publicDirPath(homeDir)
for d in (homeDir, verifDirPath, pubDirPath):
os.makedirs(d, exist_ok=True)
for other in others:
createCertsFromKeys(verifDirPath, other.name, other.verKey)
createCertsFromKeys(pubDirPath, other.name, other.publicKey)
def tellKeysToOthers(self, others):
for other in others:
createCertsFromKeys(other.verifKeyDir, self.name, self.verKey)
createCertsFromKeys(other.publicKeysDir, self.name, self.publicKey)
def setupDirs(self):
self.homeDir = self.homeDirPath(self.basedirpath, self.name)
self.publicKeysDir = self.publicDirPath(self.homeDir)
self.secretKeysDir = self.secretDirPath(self.homeDir)
self.verifKeyDir = self.verifDirPath(self.homeDir)
self.sigKeyDir = self.sigDirPath(self.homeDir)
for d in (self.homeDir, self.publicKeysDir, self.secretKeysDir,
self.verifKeyDir, self.sigKeyDir):
os.makedirs(d, exist_ok=True)
def setupOwnKeysIfNeeded(self):
if not os.listdir(self.sigKeyDir):
# If signing keys are not present, secret (private keys) should
# not be present since they should be converted keys.
assert not os.listdir(self.secretKeysDir)
# Seed should be present
assert self.seed, 'Keys are not setup for {}'.format(self)
logger.info("Signing and Encryption keys were not found for {}. "
"Creating them now".format(self),
extra={"cli": False})
tdirS = os.path.join(self.homeDir, '__skeys__')
tdirE = os.path.join(self.homeDir, '__ekeys__')
os.makedirs(tdirS, exist_ok=True)
os.makedirs(tdirE, exist_ok=True)
createEncAndSigKeys(tdirE, tdirS, self.name, self.seed)
moveKeyFilesToCorrectLocations(tdirE, self.publicKeysDir,
self.secretKeysDir)
moveKeyFilesToCorrectLocations(tdirS, self.verifKeyDir,
self.sigKeyDir)
shutil.rmtree(tdirE)
shutil.rmtree(tdirS)
def setupAuth(self, restricted=True, force=False):
if self.auth and not force:
raise RuntimeError('Listener already setup')
location = self.publicKeysDir if restricted else zmq.auth.CURVE_ALLOW_ANY
# self.auth = AsyncioAuthenticator(self.ctx)
self.auth = MultiZapAuthenticator(self.ctx)
self.auth.start()
self.auth.allow('0.0.0.0')
self.auth.configure_curve(domain='*', location=location)
def teardownAuth(self):
if self.auth:
self.auth.stop()
def setupSigning(self):
# Setup its signer from the signing key stored at disk and for all
# verification keys stored at disk, add Verifier
_, sk = self.selfSigKeys
self.signer = Signer(z85.decode(sk))
for vk in self.getAllVerKeys():
self.addVerifier(vk)
def addVerifier(self, verkey):
self.verifiers[verkey] = Verifier(z85.decode(verkey))
def start(self, restricted=None, reSetupAuth=True):
# self.ctx = test.asyncio.Context.instance()
self.ctx = zmq.Context.instance()
if self.config.MAX_SOCKETS:
self.ctx.MAX_SOCKETS = self.config.MAX_SOCKETS
restricted = self.restricted if restricted is None else restricted
logger.debug('{} starting with restricted as {} and reSetupAuth '
'as {}'.format(self, restricted, reSetupAuth),
extra={"cli": False, "demo": False})
self.setupAuth(restricted, force=reSetupAuth)
self.open()
def stop(self):
if self.opened:
logger.info('stack {} closing its listener'.format(self),
extra={"cli": False, "demo": False})
self.close()
self.teardownAuth()
logger.info("stack {} stopped".format(self),
extra={"cli": False, "demo": False})
@property
def opened(self):
return self.listener is not None
def open(self):
# noinspection PyUnresolvedReferences
self.listener = self.ctx.socket(zmq.ROUTER)
# noinspection PyUnresolvedReferences
# self.poller.register(self.listener, test.POLLIN)
public, secret = self.selfEncKeys
self.listener.curve_secretkey = secret
self.listener.curve_publickey = public
self.listener.curve_server = True
self.listener.identity = self.publicKey
logger.debug(
'{} will bind its listener at {}:{}'.format(self, self.ha[0], self.ha[1]))
set_keepalive(self.listener, self.config)
set_zmq_internal_queue_size(self.listener, self.queue_size)
self.listener.bind(
'{protocol}://{ip}:{port}'.format(ip=self.ha[0], port=self.ha[1],
protocol=ZMQ_NETWORK_PROTOCOL)
)
def close(self):
self.listener.unbind(self.listener.LAST_ENDPOINT)
self.listener.close(linger=0)
self.listener = None
logger.debug('{} starting to disconnect remotes'.format(self))
for r in self.remotes.values():
r.disconnect()
self.remotesByKeys.pop(r.publicKey, None)
self._remotes = {}
if self.remotesByKeys:
logger.debug('{} found remotes that were only in remotesByKeys and '
'not in remotes. This is suspicious')
for r in self.remotesByKeys.values():
r.disconnect()
self.remotesByKeys = {}
self._conns = set()
@property
def selfEncKeys(self):
serverSecretFile = os.path.join(self.secretKeysDir,
"{}.key_secret".format(self.name))
return zmq.auth.load_certificate(serverSecretFile)
@property
def selfSigKeys(self):
serverSecretFile = os.path.join(self.sigKeyDir,
"{}.key_secret".format(self.name))
return zmq.auth.load_certificate(serverSecretFile)
@property
def isRestricted(self):
return not self.auth.allow_any if self.auth is not None \
else self.restricted
@property
def isKeySharing(self):
# TODO: Change name after removing test
return not self.isRestricted
def isConnectedTo(self, name: str = None, ha: Tuple = None):
if self.onlyListener:
return self.hasRemote(name)
return super().isConnectedTo(name, ha)
def hasRemote(self, name):
if self.onlyListener:
if isinstance(name, str):
name = name.encode()
if name in self.peersWithoutRemotes:
return True
return super().hasRemote(name)
def removeRemoteByName(self, name: str):
if self.onlyListener:
if name in self.peersWithoutRemotes:
self.peersWithoutRemotes.remove(name)
return True
else:
return super().removeRemoteByName(name)
def getHa(self, name):
# Return HA as None when its a `peersWithoutRemote`
if self.onlyListener:
if isinstance(name, str):
name = name.encode()
if name in self.peersWithoutRemotes:
return None
return super().getHa(name)
async def service(self, limit=None) -> int:
"""
Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed.
"""
if self.listener:
await self._serviceStack(self.age)
else:
logger.debug("{} is stopped".format(self))
r = len(self.rxMsgs)
if r > 0:
pracLimit = limit if limit else sys.maxsize
return self.processReceived(pracLimit)
return 0
def _verifyAndAppend(self, msg, ident):
try:
self.msgLenVal.validate(msg)
decoded = msg.decode()
except (UnicodeDecodeError, InvalidMessageExceedingSizeException) as ex:
errstr = 'Message will be discarded due to {}'.format(ex)
frm = self.remotesByKeys[ident].name if ident in self.remotesByKeys else ident
logger.warning("Got from {} {}".format(frm, errstr))
self.msgRejectHandler(errstr, frm)
return False
self.rxMsgs.append((decoded, ident))
return True
def _receiveFromListener(self, quota) -> int:
"""
Receives messages from listener
:param quota: number of messages to receive
:return: number of received messages
"""
assert quota
i = 0
while i < quota:
try:
ident, msg = self.listener.recv_multipart(flags=zmq.NOBLOCK)
if not msg:
# Router probing sends empty message on connection
continue
i += 1
if self.onlyListener and ident not in self.remotesByKeys:
self.peersWithoutRemotes.add(ident)
self._verifyAndAppend(msg, ident)
except zmq.Again:
break
if i > 0:
logger.trace('{} got {} messages through listener'.
format(self, i))
return i
def _receiveFromRemotes(self, quotaPerRemote) -> int:
"""
Receives messages from remotes
:param quotaPerRemote: number of messages to receive from one remote
:return: number of received messages
"""
assert quotaPerRemote
totalReceived = 0
for ident, remote in self.remotesByKeys.items():
if not remote.socket:
continue
i = 0
sock = remote.socket
while i < quotaPerRemote:
try:
msg, = sock.recv_multipart(flags=zmq.NOBLOCK)
if not msg:
# Router probing sends empty message on connection
continue
i += 1
self._verifyAndAppend(msg, ident)
except zmq.Again:
break
if i > 0:
logger.trace('{} got {} messages through remote {}'.
format(self, i, remote))
totalReceived += i
return totalReceived
async def _serviceStack(self, age):
# TODO: age is unused
# These checks are kept here and not moved to a function since
# `_serviceStack` is called very often and function call is an overhead
if self.config.ENABLE_HEARTBEATS and (
self.last_heartbeat_at is None or
(time.perf_counter() - self.last_heartbeat_at) >=
self.config.HEARTBEAT_FREQ):
self.send_heartbeats()
self._receiveFromListener(quota=self.listenerQuota)
self._receiveFromRemotes(quotaPerRemote=self.senderQuota)
return len(self.rxMsgs)
def processReceived(self, limit):
if limit <= 0:
return 0
num_processed = 0
for num_processed in range(limit):
if len(self.rxMsgs) == 0:
return num_processed
msg, ident = self.rxMsgs.popleft()
frm = self.remotesByKeys[ident].name \
if ident in self.remotesByKeys else ident
if self.handlePingPong(msg, frm, ident):
continue
try:
msg = self.deserializeMsg(msg)
except Exception as e:
logger.error('Error {} while converting message {} '
'to JSON from {}'.format(e, msg, ident))
continue
msg = self.doProcessReceived(msg, frm, ident)
if msg:
self.msgHandler((msg, frm))
return num_processed + 1
def doProcessReceived(self, msg, frm, ident):
return msg
def connect(self,
name=None,
remoteId=None,
ha=None,
verKeyRaw=None,
publicKeyRaw=None):
"""
Connect to the node specified by name.
"""
if not name:
raise ValueError('Remote name should be specified')
if name in self.remotes:
remote = self.remotes[name]
else:
publicKey = z85.encode(
publicKeyRaw) if publicKeyRaw else self.getPublicKey(name)
verKey = z85.encode(
verKeyRaw) if verKeyRaw else self.getVerKey(name)
if not ha or not publicKey or (self.isRestricted and not verKey):
raise ValueError('{} doesnt have enough info to connect. '
'Need ha, public key and verkey. {} {} {}'.
format(name, ha, verKey, publicKey))
remote = self.addRemote(name, ha, verKey, publicKey)
public, secret = self.selfEncKeys
remote.connect(self.ctx, public, secret)
logger.info("{}{} looking for {} at {}:{}"
.format(CONNECTION_PREFIX, self,
name or remote.name, *remote.ha),
extra={"cli": "PLAIN", "tags": ["node-looking"]})
# This should be scheduled as an async task
self.sendPingPong(remote, is_ping=True)
return remote.uid
def reconnectRemote(self, remote):
"""
Disconnect remote and connect to it again
:param remote: instance of Remote from self.remotes
:param remoteName: name of remote
:return:
"""
if not isinstance(remote, Remote):
raise PlenumTypeError('remote', remote, Remote)
logger.debug('{} reconnecting to {}'.format(self, remote))
public, secret = self.selfEncKeys
remote.disconnect()
remote.connect(self.ctx, public, secret)
self.sendPingPong(remote, is_ping=True)
def reconnectRemoteWithName(self, remoteName):
if remoteName not in self.remotes:
raise PlenumValueError(
'remoteName', remoteName,
"one of {}".format(self.remotes)
)
self.reconnectRemote(self.remotes[remoteName])
def disconnectByName(self, remoteName: str):
if not remoteName:
raise PlenumValueError(
'remoteName', remoteName,
"non-empty string"
)
remote = self.remotes.get(remoteName)
if not remote:
logger.debug('{} did not find any remote '
'by name {} to disconnect'
.format(self, remoteName))
return None
remote.disconnect()
return remote
def addRemote(self, name, ha, remoteVerkey, remotePublicKey):
if not name:
raise PlenumValueError('name', name, 'non-empty')
remote = self._RemoteClass(name, ha, remoteVerkey, remotePublicKey, self.queue_size)
self.remotes[name] = remote
# TODO: Use weakref to remote below instead
self.remotesByKeys[remotePublicKey] = remote
if remoteVerkey:
self.addVerifier(remoteVerkey)
else:
logger.debug('{} adding a remote {}({}) without a verkey'.
format(self, name, ha))
return remote
def sendPingPong(self, remote: Union[str, Remote], is_ping=True):
msg = self.pingMessage if is_ping else self.pongMessage
action = 'ping' if is_ping else 'pong'
name = remote if isinstance(remote, (str, bytes)) else remote.name
r = self.send(msg, name)
if r[0] is True:
logger.debug('{} {}ed {}'.format(self.name, action, name))
elif r[0] is False:
# TODO: This fails the first time as socket is not established,
# need to make it retriable
logger.debug('{} failed to {} {} {}'
.format(self.name, action, name, r[1]),
extra={"cli": False})
elif r[0] is None:
logger.debug('{} will be sending in batch'.format(self))
else:
logger.warning('{}{} got an unexpected return value {}'
' while sending'
.format(CONNECTION_PREFIX, self, r))
return r[0]
def handlePingPong(self, msg, frm, ident):
if msg in (self.pingMessage, self.pongMessage):
if msg == self.pingMessage:
logger.trace('{} got ping from {}'.format(self, frm))
self.sendPingPong(frm, is_ping=False)
if msg == self.pongMessage:
if ident in self.remotesByKeys:
self.remotesByKeys[ident].setConnected()
logger.trace('{} got pong from {}'.format(self, frm))
return True
return False
def send_heartbeats(self):
# Sends heartbeat (ping) to all
logger.debug('{} sending heartbeat to all remotes'.format(self))
for remote in self.remotes:
self.sendPingPong(remote)
self.last_heartbeat_at = time.perf_counter()
def send(self, msg: Any, remoteName: str = None, ha=None):
if self.onlyListener:
return self.transmitThroughListener(msg, remoteName)
else:
if remoteName is None:
r = []
e = []
# Serializing beforehand since to avoid serializing for each
# remote
try:
msg = self.prepare_to_send(msg)
except InvalidMessageExceedingSizeException as ex:
err_str = '{}Cannot send message. Error {}'.format(
CONNECTION_PREFIX, ex)
logger.error(err_str)
return False, err_str
for uid in self.remotes:
res, err = self.transmit(msg, uid, serialized=True)
r.append(res)
e.append(err)
e = list(filter(lambda x: x is not None, e))
ret_err = None if len(e) == 0 else "\n".join(e)
return all(r), ret_err
else:
return self.transmit(msg, remoteName)
def transmit(self, msg, uid, timeout=None, serialized=False):
remote = self.remotes.get(uid)
err_str = None
if not remote:
logger.debug("Remote {} does not exist!".format(uid))
return False, err_str
socket = remote.socket
if not socket:
logger.debug('{} has uninitialised socket '
'for remote {}'.format(self, uid))
return False, err_str
try:
if not serialized:
msg = self.prepare_to_send(msg)
# socket.send(self.signedMsg(msg), flags=zmq.NOBLOCK)
socket.send(msg, flags=zmq.NOBLOCK)
logger.trace('{} transmitting message {} to {}'
.format(self, msg, uid))
if not remote.isConnected and msg not in self.healthMessages:
logger.info('Remote {} is not connected - '
'message will not be sent immediately.'
'If this problem does not resolve itself - '
'check your firewall settings'.format(uid))
return True, err_str
except zmq.Again:
logger.debug(
'{} could not transmit message to {}'.format(self, uid))
except InvalidMessageExceedingSizeException as ex:
err_str = '{}Cannot transmit message. Error {}'.format(
CONNECTION_PREFIX, ex)
logger.error(err_str)
return False, err_str
def transmitThroughListener(self, msg, ident) -> Tuple[bool, Optional[str]]:
if isinstance(ident, str):
ident = ident.encode()
if ident not in self.peersWithoutRemotes:
logger.debug('{} not sending message {} to {}'.
format(self, msg, ident))
logger.debug("This is a temporary workaround for not being able to "
"disconnect a ROUTER's remote")
return False, None
try:
msg = self.prepare_to_send(msg)
# noinspection PyUnresolvedReferences
# self.listener.send_multipart([ident, self.signedMsg(msg)],
# flags=zmq.NOBLOCK)
logger.trace('{} transmitting {} to {} through listener socket'.
format(self, msg, ident))
self.listener.send_multipart([ident, msg], flags=zmq.NOBLOCK)
return True, None
except zmq.Again:
return False, None
except InvalidMessageExceedingSizeException as ex:
err_str = '{}Cannot transmit message. Error {}'.format(
CONNECTION_PREFIX, ex)
logger.error(err_str)
return False, err_str
except Exception as e:
err_str = '{}{} got error {} while sending through listener to {}'\
.format(CONNECTION_PREFIX, self, e, ident)
logger.error(err_str)
return False, err_str
return True, None
@staticmethod
def serializeMsg(msg):
if isinstance(msg, Mapping):
msg = json.dumps(msg)
if isinstance(msg, str):
msg = msg.encode()
assert isinstance(msg, bytes)
return msg
@staticmethod
def deserializeMsg(msg):
if isinstance(msg, bytes):
msg = msg.decode()
msg = json.loads(msg)
return msg
def signedMsg(self, msg: bytes, signer: Signer=None):
sig = self.signer.signature(msg)
return msg + sig
def verify(self, msg, by):
if self.isKeySharing:
return True
if by not in self.remotesByKeys:
return False
verKey = self.remotesByKeys[by].verKey
r = self.verifiers[verKey].verify(
msg[-self.sigLen:], msg[:-self.sigLen])
return r
@staticmethod
def loadPubKeyFromDisk(directory, name):
filePath = os.path.join(directory,
"{}.key".format(name))
try:
public, _ = zmq.auth.load_certificate(filePath)
return public
except (ValueError, IOError) as ex:
raise KeyError from ex
@staticmethod
def loadSecKeyFromDisk(directory, name):
filePath = os.path.join(directory,
"{}.key_secret".format(name))
try:
_, secret = zmq.auth.load_certificate(filePath)
return secret
except (ValueError, IOError) as ex:
raise KeyError from ex
@property
def publicKey(self):
return self.getPublicKey(self.name)
@property
def publicKeyRaw(self):
return z85.decode(self.publicKey)
@property
def pubhex(self):
return hexlify(z85.decode(self.publicKey))
def getPublicKey(self, name):
try:
return self.loadPubKeyFromDisk(self.publicKeysDir, name)
except KeyError:
raise PublicKeyNotFoundOnDisk(self.name, name)
@property
def verKey(self):
return self.getVerKey(self.name)
@property
def verKeyRaw(self):
if self.verKey:
return z85.decode(self.verKey)
return None
@property
def verhex(self):
if self.verKey:
return hexlify(z85.decode(self.verKey))
return None
def getVerKey(self, name):
try:
return self.loadPubKeyFromDisk(self.verifKeyDir, name)
except KeyError:
if self.isRestricted:
raise VerKeyNotFoundOnDisk(self.name, name)
return None
@property
def sigKey(self):
return self.loadSecKeyFromDisk(self.sigKeyDir, self.name)
# TODO: Change name to sighex after removing test
@property
def keyhex(self):
return hexlify(z85.decode(self.sigKey))
@property
def priKey(self):
return self.loadSecKeyFromDisk(self.secretKeysDir, self.name)
@property
def prihex(self):
return hexlify(z85.decode(self.priKey))
def getAllVerKeys(self):
keys = []
for key_file in os.listdir(self.verifKeyDir):
if key_file.endswith(".key"):
serverVerifFile = os.path.join(self.verifKeyDir,
key_file)
serverPublic, _ = zmq.auth.load_certificate(serverVerifFile)
keys.append(serverPublic)
return keys
def setRestricted(self, restricted: bool):
if self.isRestricted != restricted:
logger.debug('{} setting restricted to {}'.
format(self, restricted))
self.stop()
# TODO: REMOVE, it will make code slow, only doing to allow the
# socket to become available again
time.sleep(1)
self.start(restricted, reSetupAuth=True)
def _safeRemove(self, filePath):
try:
os.remove(filePath)
except Exception as ex:
logger.debug('{} could delete file {} due to {}'.
format(self, filePath, ex))
def clearLocalRoleKeep(self):
for d in (self.secretKeysDir, self.sigKeyDir):
filePath = os.path.join(d, "{}.key_secret".format(self.name))
self._safeRemove(filePath)
for d in (self.publicKeysDir, self.verifKeyDir):
filePath = os.path.join(d, "{}.key".format(self.name))
self._safeRemove(filePath)
def clearRemoteRoleKeeps(self):
for d in (self.secretKeysDir, self.sigKeyDir):
for key_file in os.listdir(d):
if key_file != '{}.key_secret'.format(self.name):
self._safeRemove(os.path.join(d, key_file))
for d in (self.publicKeysDir, self.verifKeyDir):
for key_file in os.listdir(d):
if key_file != '{}.key'.format(self.name):
self._safeRemove(os.path.join(d, key_file))
def clearAllDir(self):
shutil.rmtree(self.homeDir)
def prepare_to_send(self, msg: Any):
msg_bytes = self.serializeMsg(msg)
self.msgLenVal.validate(msg_bytes)
return msg_bytes
class DummyKeep:
def __init__(self, stack, *args, **kwargs):
self.stack = stack
self._auto = 2 if stack.isKeySharing else 0
@property
def auto(self):
logger.debug('{} proxy method used on {}'.
format(inspect.stack()[0][3], self))
return self._auto
@auto.setter
def auto(self, mode):
logger.debug('{} proxy method used on {}'.
format(inspect.stack()[0][3], self))
# AutoMode.once whose value is 1 is not used os dont care
if mode != self._auto:
if mode == 2:
self.stack.setRestricted(False)
if mode == 0:
self.stack.setRestricted(True)
| 36.662204 | 98 | 0.588977 |
8fafc980483716b2f561897225c875112500b245 | 1,416 | py | Python | new_paper_summary.py | KissyZhou/KissyZhou.github.io | 1a1e5aa619de08b60ea0e4bb3a12673b4043c183 | [
"MIT"
] | null | null | null | new_paper_summary.py | KissyZhou/KissyZhou.github.io | 1a1e5aa619de08b60ea0e4bb3a12673b4043c183 | [
"MIT"
] | null | null | null | new_paper_summary.py | KissyZhou/KissyZhou.github.io | 1a1e5aa619de08b60ea0e4bb3a12673b4043c183 | [
"MIT"
] | null | null | null | # Patrick Emami
#
# Generates the boiler-plate for a new paper summary
import datetime
import os
if __name__ == '__main__':
# Find out the topic
topic_code = input("What topic does this paper fall under?\n\t(1) AGI\n\t(2) computer vision\n\t(3) deep learning theory\n\t(4) deep RL\n\t(5) general ML\n\t(6) GANs\n\t(7) NLP\n\t(8) RL theory\n")
topic_code = int(topic_code)
if topic_code == 1:
topic = "AGI"
elif topic_code == 2:
topic = "computer-vision"
elif topic_code == 3:
topic = "deep-learning-theory"
elif topic_code == 4:
topic = "deep-RL"
elif topic_code == 5:
topic = "general-ML"
elif topic_code == 6:
topic = "generative-adversarial-networks"
elif topic_code == 7:
topic = "natural-language-processing"
elif topic_code == 8:
topic = "reinforcement-learning-theory"
else:
print("[!] Unknown topic")
exit(1)
fn = input("Provide a shortened title for the URL:\n")
# Find out today's date in %YYYY-MM-DD format
date = datetime.date.today().strftime("%Y-%m-%d")
fn = os.path.join('paper-summaries', topic, '_posts', date + '-' + fn + '.markdown')
with open(fn, 'w+') as new_paper_sum:
with open(os.path.join('_drafts', 'paper-summaries-template.md'), 'r') as template:
t = template.read()
new_paper_sum.write(t)
| 32.930233 | 201 | 0.60452 |
abf289e47fa34a45cf4240cd87e1456a122456e6 | 3,011 | py | Python | scripts/mactool.py | westurner/dotfiles | 92316b97a99677b345f9dfe15f5e4f7dd5c7218a | [
"BSD-3-Clause"
] | 31 | 2015-03-07T13:23:37.000Z | 2021-04-30T07:44:07.000Z | scripts/mactool.py | westurner/dotfiles | 92316b97a99677b345f9dfe15f5e4f7dd5c7218a | [
"BSD-3-Clause"
] | 36 | 2015-01-01T02:07:47.000Z | 2021-05-13T19:39:12.000Z | scripts/mactool.py | westurner/dotfiles | 92316b97a99677b345f9dfe15f5e4f7dd5c7218a | [
"BSD-3-Clause"
] | 6 | 2015-01-26T23:09:46.000Z | 2020-10-03T04:43:18.000Z | #!/usr/bin/env python
from os import getenv
from random import randrange,seed
import sys
if sys.version_info.major > 2:
from urllib.request import urlopen
else:
from urllib import urlopen
_OUI_URL = "http://standards.ieee.org/regauth/oui/oui.txt"
_MACFILE = "%s/.macvendors" % getenv("HOME")
_V_VMWARE = '00:0c:29'
def rand_hex(num):
return [hex(randrange(0,255)).split("x")[1].zfill(2) for n in range(0,num)]
def rand_vmware():
l = _V_VMWARE.split(":")
l.extend(rand_hex(3))
return ':'.join(l)
rand_global = lambda: ':'.join(rand_hex(6))
def format_line(line):
s = line.split()
return (s[0], ' '.join(x.lower().capitalize() for x in s[2:]))
def download_oui():
#f = file(filename,"r")
f = urlopen(_OUI_URL)
with file(_MACFILE,"w+") as o:
lines = (format_line(l) for l in f if "(hex)" in l)
o.writelines( ','.join(x)+"\n" for x in lines)
o.close()
print("OUI File downloaded to %s" % _MACFILE)
def mac_to_vendor(mac):
f = file(_MACFILE,"r")
mac = mac.replace(":","-")[:8].upper()
return '\n'.join(
''.join(x.split(",")[1:]).strip() for x in f if x.startswith(mac))
def find_vendor(vendor):
f = file(_MACFILE,"r")
vendor = vendor.lower()
return '\n'.join(
x.replace('-',':').strip() for x in f if vendor in f.lower())
def mac_from_prefix(prefix):
p = prefix.replace("-",":")[:8]
l = p.split(":")
if len(l) is not 3:
raise Exception("Bad prefix")
l.extend(rand_hex(3))
return (':'.join(l)).upper()
def mac_from_vendor(vendor):
l = [x.split(",")[0] for x in find_vendor(vendor).split("\n")]
return mac_from_prefix(l[randrange(0,len(l))])
if __name__=="__main__":
_VERSION = 0.1
seed()
from optparse import OptionParser
arg = OptionParser(version="%prog 0.1")
arg.add_option("-d","--download",
dest="download",
action="store_true",
help="Download latest MAC list")
arg.add_option("-m","--mac",
dest="mac",
help="MAC Address to Convert to Vendor String")
arg.add_option("-v","--vendor",
dest="vendor",
help="Vendor to search for prefixes")
arg.add_option("-r","--random",
dest="rand",
action="store_true",
help="Generate random MAC")
arg.add_option("-a",
dest="stdall",
action="store_true",
help="Convert All From stdin")
(options, args) = arg.parse_args()
if(options.download):
download_oui()
if(options.vendor and options.rand):
print(mac_from_vendor(options.vendor))
elif(options.vendor):
print(find_vendor(options.vendor))
if(options.mac and options.rand):
print(mac_from_prefix(options.mac))
elif(options.mac):
print(mac_to_vendor(options.mac))
if(options.stdall):
ilines = sys.stdin.readlines()
for x in ilines:
print("%s -> %s" % (x.strip(), mac_to_vendor(x)))
| 27.87963 | 79 | 0.589837 |
8a8ae0435414460b04a9b9841f41c8c91f162381 | 24,578 | py | Python | canned_http.py | mgp/canned-http | dade87ae512f06abf865641ec4cea86607b24cca | [
"MIT"
] | 1 | 2017-01-09T18:37:04.000Z | 2017-01-09T18:37:04.000Z | canned_http.py | mgp/canned-http | dade87ae512f06abf865641ec4cea86607b24cca | [
"MIT"
] | null | null | null | canned_http.py | mgp/canned-http | dade87ae512f06abf865641ec4cea86607b24cca | [
"MIT"
] | null | null | null | """A web server that accepts HTTP requests from a client, verifies that each
request contains some expected values, and returns canned responses for the
requests. The expected values in the requests and the canned responses are
specified by a script that is provided when the web server is run.
For requests, the script specifies the following parameters:
* method (required): The HTTP method used, such as GET or POST.
* url (required): The URL path requested.
* headers (optional): A map of expected HTTP headers. If provided, the headers
of a request must be a superset of these headers.
* body (optional): The expected body of the request, such as the data
submitted in a POST request.
* body_filename (optional): The filename whose contents should be expected as
the body of the request.
* body_type: (optional): If present, the received body and the expected body
will be converted to the given type before returning. Currently the only
valid value is JSON.
For responses, the script specifies the following parameters:
* status_code (required): The HTTP status code to return, such as 200 or 404.
* content_type (required): The value of the Content-Type header to return.
* headers (optional): A map of HTTP header names and values to return in the
response.
* delay (optional): The number of seconds to wait before sending the response,
which is useful for simulating long-polling by the server.
* body (optional): The body of the response, such as the HTML to render in the
browser in response to a GET request.
* body_filename (optional): The filename whose contents should be used as the
body of the response.
A response can be omitted altogether, which is useful for simulating
long-polling where the client must close the connection. If a response is
present, then exactly one of body and body_filename must be set. Setting both
or neither is invalid.
A request and the optional response is called an exchange. The persistent
connections feature of HTTP 1.1 allows multiple exchanges over a single TCP/IP
connection between the client and the server, provided that every exchange
except the last includes a response. An array of exchanges represents all the
exchanges across a single connection. The script is simply an array of such
arrays, so that it specifies the number of expected connections, and the order
of exchanges for each connection.
Author: Michael Parker (michael.g.parker@gmail.com)
"""
import argparse
import BaseHTTPServer
import json
import os
import SocketServer
import sys
import time
class Script:
"""A script specifying the expected requests made by the client, and the
replies sent by the server.
"""
def __init__(self, connections=()):
self._connections = tuple(connections)
def __repr__(init):
return 'connections=%s' % repr(self._connections)
class Connection:
"""A connection from the client to the server.
For HTTP 1.1 all connections are persistent unless declared otherwise,
allowing multiple requests and replies over a single connection. This is
modeled by a Connection containing a sequence of Exchange instances.
"""
def __init__(self, exchanges=()):
self._exchanges = tuple(exchanges)
def __repr__(self):
return 'exchanges=%s' % repr(self._exchanges)
class Exchange:
"""An exchange, or a request received from the client and an optional reply by
the server.
The server can either can either send a reply after some specified delay in
milliseconds, or can choose to send no reply. If the server does not send a
reply, it is the responsibility of the client to terminate the connection.
(A typical web server will disconnect after some timeout expires, but
well-behaved clients should also timeout and disconnect.)
"""
@staticmethod
def _join_parts(string_parts):
string = ', '.join(('%s: %s' % (key, value) for (key, value) in string_parts))
return '{%s}' % string
class Request:
"""A request from the client to the server.
A request must contain a HTTP method and URL. Expected headers and the
request body, typically only used with POST or PUT, are optional.
"""
@staticmethod
def request_with_no_body(method, url, headers=None):
"""Returns a request with no body, such as for a GET request."""
return Exchange.Request(method, url, headers)
@staticmethod
def request_with_body(method, url, body, body_type=None, headers=None):
"""Returns a request with the given string as the body."""
return Exchange.Request(method, url, headers, body=body, body_type=body_type)
@staticmethod
def request_from_file(method, url, body_filename, body_type=None, headers=None):
"""Returns a request with the contents of the given file as the body."""
return Exchange.Request(
method, url, headers, body_filename=body_filename, body_type=body_type)
def __init__(self, method, url, headers=None, body=None, body_filename=None,
body_type=None):
self._method = method
self._url = url
self._headers = headers or {}
self._body = body
self._body_filename = body_filename
self._body_type = body_type
def __repr__(self):
request_parts = [('method', self._method), ('url', self._url)]
if self._headers:
request_parts.append(('headers', self._headers))
if self._body:
request_parts.append(('body', self._body))
return Exchange._join_parts(request_parts)
class Response:
"""A response from the server to the client.
A response must contain a HTTP status code, a value for the Content-Type
header, and a body. The body is either a given string or the contents of a
given file. Additional headers and a delay before sending the response are
optional.
"""
@staticmethod
def response_with_body(status_code, content_type, body, headers=None, delay=0):
"""Returns a response with the given string as the body."""
return Exchange.Response(status_code, content_type, delay, headers, body=body)
@staticmethod
def response_from_file(status_code, content_type, body_filename, headers=None,
delay=0):
"""Returns a response with the contents of the given file as the body."""
return Exchange.Response(status_code, content_type, delay, headers,
body_filename=body_filename)
def __init__(self, status_code, content_type, delay, headers=None,
body=None, body_filename=None):
self._status_code = status_code
self._content_type = content_type
self._delay = delay
self._headers = headers
self._body = body
self._body_filename = body_filename
def __repr__(self):
response_parts = [('status_code', self._status_code),
('content_type', self._content_type)]
if self._delay:
response_parts.append(('delay', self._delay))
if self._headers:
response_parts.append(('headers', repr(self._headers)))
if self._body:
response_parts.append(('body', self._body))
elif self._body_filename:
response_parts.append(('body_filename', self._body_filename))
return Exchange._join_parts(response_parts)
def __init__(self, request, response=None):
self._request = request
self._response = response
def __repr__(self):
if self._response:
return '{request=%s, response=%s}' % (repr(self._request), repr(self._response))
else:
return '{request=%s}' % repr(self._request)
class DirectorError(Exception):
"""An exception raised if the Director encountered an unexpected request or
event in a Script.
"""
def __init__(self, message):
self._message = message
def __str__(self):
return repr(self)
def __repr__(self):
return self._message
class Director:
"""Class that ensures that connections established and requests sent by the
client follow the provided Script instance.
If the script is not followed, a DirectorError is raised.
"""
class _Event:
"""An event that the server expects to generate as part of the script.
This class is simply to make verifying a Script easier.
"""
_CONNECTION_OPENED = 'connection_opened'
_CONNECTION_CLOSED = 'connection_closed'
_GOT_REQUEST = 'got_request'
@staticmethod
def connection_opened_event(connection_index):
"""Returns an event for when a connection is opened."""
return Director._Event(
Director._Event._CONNECTION_OPENED, connection_index)
@staticmethod
def connection_closed_event(connection_index):
"""Returns an event for when a connection is closed."""
return Director._Event(
Director._Event._CONNECTION_CLOSED, connection_index)
@staticmethod
def exchange_event(connection_index, exchange_index, exchange):
"""Returns an event for the given exchange, or request and optional reply.
"""
return Director._Event(
Director._Event._GOT_REQUEST, connection_index, exchange_index, exchange)
def __init__(self, event_type, connection_index, exchange_index=None, exchange=None):
self._type = event_type
self._connection_index = connection_index
if exchange_index is not None:
self._exchange_index = exchange_index
if exchange is not None:
self._exchange = exchange
def __init__(self, script):
self._next_event = None
self._next_event_ready = False
# Convert the given Script into a sequence of DirectorEvent instances.
events = []
for connection_index, connection in enumerate(script._connections, 1):
events.append(
Director._Event.connection_opened_event(connection_index))
for exchange_index, exchange in enumerate(connection._exchanges, 1):
events.append(
Director._Event.exchange_event(connection_index, exchange_index, exchange))
events.append(
Director._Event.connection_closed_event(connection_index))
self._events_iter = iter(events)
def _ready_next_event(self):
if not self._next_event_ready:
try:
self._next_event = next(self._events_iter)
except StopIteration:
# The last event has been reached.
self._next_event = None
self._next_event_ready = True
def _finish_current_event(self):
self._next_event_ready = False
def connection_opened(self):
"""Called by the web server when the client opens a connection."""
self._ready_next_event()
if self._next_event is None:
raise DirectorError('Client opened a connection after the script ended.')
self._finish_current_event()
def connection_closed(self):
"""Called by the web server when the client closes the connection."""
self._ready_next_event()
if self._next_event._type == Director._Event._GOT_REQUEST:
raise DirectorError(
'Client closed connection %s instead of performing exchange %s' %
(self._next_event._connection_index, self._next_event._exchange_index))
self._finish_current_event()
def got_request(self, method, url, headers={}, body=None):
"""Called by the web server when the client sends an HTTP request.
Returns a tuple containing the delay and the reply to send back. If the
reply is None, then the delay is irrelevant and the server should wait for
the client to close the connection.
"""
self._ready_next_event()
if self._next_event._type == Director._Event._CONNECTION_CLOSED:
raise DirectorError(
"Client sent request with method '%s' and URL '%s' instead of closing "
"connection %s" % (method, url, self._next_event._connection_index))
exchange = self._next_event._exchange
request = exchange._request
# Assert that the method is correct.
if method != request._method:
raise DirectorError(
"Expected 'method' value '%s', received '%s' for connection %s, exchange %s" %
(request._method, method, self._next_event._connection_index,
self._next_event._exchange_index))
# Assert that the URL is correct.
if url != request._url:
raise DirectorError(
"Expected 'url' value '%s', received '%s' for connection %s, exchange %s" %
(request._url, url, self._next_event._connection_index,
self._next_event._exchange_index))
# Create the expected body.
if request._body:
expected_body = request._body
elif request._body_filename:
f = open(request._body_filename, 'rb')
expected_body = f.read()
f.close()
else:
expected_body = None
if request._body_type == 'json':
# Convert the body and expected body to JSON if needed.
if body:
body = json.loads(body)
if expected_body:
expected_body = json.loads(expected_body)
# Assert that the optional body is correct.
if body != expected_body:
raise DirectorError(
"Expected 'body' value '%s', received '%s' for connection %s, exchange %s" %
(expected_body, body, self._next_event._connection_index,
self._next_event._exchange_index))
# Assert that the headers are correct.
for header_name, expected_header_value in request._headers.iteritems():
# Class rfc822.Message performs a case insensitive search on header names.
header_value = headers.get(header_name, None)
if expected_header_value != header_value:
raise DirectorError(
"Expected value '%s' for header name '%s', "
"received '%s' for connection %s, exchange %s" %
(expected_header_value, header_name, header_value,
self._next_event._connection_index, self._next_event._exchange_index))
self._finish_current_event()
return exchange._response
def is_done(self):
"""Returns whether the script has been fully run by the client."""
self._ready_next_event()
return self._next_event is None
class ScriptParseError(Exception):
"""An exception raised if elements of a Script could not be parsed."""
def __init__(self, message):
self._message = message
def __str__(self):
return repr(self)
def __repr__(self):
return self._message
def script_from_data(script_data, base_dir=None):
"""Returns a Script instance parsed from the given Python objects.
"""
if not base_dir:
base_dir = os.getcwd()
connections = []
for i, connection_data in enumerate(script_data, 1):
exchanges = []
reached_no_reply = False
for j, exchange_data in enumerate(connection_data, 1):
if reached_no_reply:
raise ScriptParseError(
"Reply missing for exchange preceding connection %s, exchange %s" % (i, j))
request_data = exchange_data.get('request', None)
if request_data is None:
raise ScriptParseError(
"Missing 'request' key for connection %s, exchange %s" % (i, j))
# Get and validate the required method.
method = request_data.get('method', None)
if method is None:
raise ScriptParseError(
"Missing 'method' key for request in connection %s, exchange %s" % (i, j))
method_upper = method.upper()
if method_upper not in ('HEAD', 'GET', 'PUT', 'POST', 'DELETE'):
raise ScriptParseError(
"Invalid method '%s' for request in connection %s, exchange %s" %
(method, i, j))
# Get the required URL.
url = request_data.get('url', None)
if not url:
raise ScriptParseError(
"Missing 'url' key for request in connection %s, exchange %s" % (i, j))
# Get the optional headers and body.
headers = request_data.get('headers', {})
body = request_data.get('body', None)
body_filename = request_data.get('body_filename', None)
body_type = request_data.get('body_type', None)
if body_type:
body_type = body_type.lower()
if body_type != 'json':
raise ScriptParseError(
"Invalid body type '%s' for request in connection %s, exchange %s" %
(body_type, i, j))
# Create the request.
if body and body_filename:
raise ScriptParseError(
"Found both 'body' and 'body_filename' keys for request in "
"connection %s, exchange %s" % (i, j))
elif body:
# Create the request with the given body.
request = Exchange.Request.request_with_body(
method, url, body, body_type, headers)
elif body_filename:
# Create the request with a body from the given filename.
if not os.path.isabs(body_filename):
body_filename = os.path.normpath(os.path.join(base_dir, body_filename))
request = Exchange.Request.request_from_file(
method, url, body_filename, body_type, headers)
else:
# Create a request with no body.
request = Exchange.Request.request_with_no_body(method, url, headers)
response_data = exchange_data.get('response', None)
if response_data:
# Get the required status code.
status_code = response_data.get('status_code', None)
if not status_code:
raise ScriptParseError(
"Missing 'status_code' key for response in connection %s, exchange %s" %
(i, j))
# Get the required content type.
content_type = response_data.get('content_type', None)
if not content_type:
raise ScriptParseError(
"Missing 'content_type' key for response in connection %s, exchange %s" %
(i, j))
# Get the optional headers and delay.
headers = response_data.get('headers', {})
delay = response_data.get('delay', 0)
body = response_data.get('body', None)
body_filename = response_data.get('body_filename', None)
if body and body_filename:
raise ScriptParseError(
"Found both 'body' and 'body_filename' keys for response in "
"connection %s, exchange %s" % (i, j))
elif body:
# Create the response with the given body.
response = Exchange.Response.response_with_body(
status_code, content_type, body, headers, delay)
elif body_filename:
if not os.path.isabs(body_filename):
body_filename = os.path.normpath(os.path.join(base_dir, body_filename))
# Create the response with a body from the given filename.
response = Exchange.Response.response_from_file(
status_code, content_type, body_filename, headers, delay)
else:
raise ScriptParseError(
"Missing both 'body' and 'body_filename' keys for response in "
"connection %s, exchange %s" % (i, j))
else:
# There is no response for this request.
reached_no_reply = True
response = None
exchange = Exchange(request, response)
exchanges.append(exchange)
connection = Connection(exchanges)
connections.append(connection)
return Script(connections)
def script_from_json_string(json_string, base_dir=None):
"""Returns a Script instance parsed from the given string containing JSON.
"""
raw_json = json.loads(json_string)
if not raw_json:
raw_json = []
return script_from_data(raw_json, base_dir)
def script_from_yaml_string(yaml_string, base_dir=None):
"""Returns a Script instance parsed from the given string containing YAML.
"""
# The PyYAML library, see http://pyyaml.org/
import yaml
raw_yaml = yaml.safe_load(yaml_string)
if not raw_yaml:
raw_yaml = []
return script_from_data(raw_yaml, base_dir)
def _dirname_for_filename(filename):
return os.path.dirname(os.path.abspath(filename))
def script_from_json_file(json_filename):
"""Reads the contents of the given filename and returns a Script instance
parsed from the contained JSON.
"""
f = open(json_filename, 'r')
json_string = f.read()
f.close()
return script_from_json_string(
json_string, _dirname_for_filename(json_filename))
def script_from_yaml_file(yaml_filename):
"""Reads the contents of the given filename and returns a Script instance
parsed from the contained YAML.
"""
f = open(yaml_filename, 'r')
yaml_string = f.read()
f.close()
return script_from_yaml_string(
yaml_string, _dirname_for_filename(yaml_filename))
class DirectorRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A request handler that uses the given Director instance to verify the
script.
"""
@staticmethod
def set_director(director):
"""Sets the director for use over the lifetime of the web server."""
DirectorRequestHandler._director = director
DirectorRequestHandler._script_error = False
DirectorRequestHandler._script_done = False
def setup(self):
BaseHTTPServer.BaseHTTPRequestHandler.setup(self)
# Allow persistent connections.
self.protocol_version = 'HTTP/1.1'
def handle_request(self):
# Get the HTTP method and URL of the request.
method = self.command
url = self.path
headers = self.headers
# Get the body of the request.
content_length = self.headers.get('Content-Length', None)
if content_length:
content_length = int(content_length)
body = self.rfile.read(content_length)
if not body:
body = None
else:
body = None
response = DirectorRequestHandler._director.got_request(method, url, headers, body)
if response:
time.sleep(response._delay)
# Get the body of the response.
if response._body:
body = response._body
file_size = len(body)
else:
f = open(response._body_filename, 'rb')
fs = os.fstat(f.fileno())
body = f.read()
file_size = fs.st_size
f.close()
# Send the headers of the response.
self.send_response(response._status_code)
self.send_header('Content-Type', response._content_type)
self.send_header('Content-Length', file_size)
for header_name, header_value in response._headers.iteritems():
self.send_header(header_name, header_value)
self.end_headers()
# Send the body to conclude the response.
self.wfile.write(body)
DirectorRequestHandler._script_done = DirectorRequestHandler._director.is_done()
def do_HEAD(self):
self.handle_request()
def do_GET(self):
self.handle_request()
def do_POST(self):
self.handle_request()
def do_PUT(self):
self.handle_request()
def do_DELETE(self):
self.handle_request()
def handle(self):
try:
DirectorRequestHandler._director.connection_opened()
BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
DirectorRequestHandler._director.connection_closed()
DirectorRequestHandler._script_done = DirectorRequestHandler._director.is_done()
except DirectorError as e:
# Exceptions raised from handle_request will also be caught here.
print >> sys.stderr, 'ERROR: ', repr(e)
DirectorRequestHandler._script_error = True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--port', type=int, required=False, default=8080,
help='Port the web server should run on')
arg_parser.add_argument('--json_filename', type=str, required=False, default='',
help='JSON input file for expected requests and replies')
arg_parser.add_argument('--yaml_filename', type=str, required=False, default='',
help='YAML input file for expected requests and replies')
parsed_args = arg_parser.parse_args()
# Create the script from the provided filename.
if parsed_args.json_filename and parsed_args.yaml_filename:
print >> sys.stderr, 'Cannot specify both --json_filename and --yaml_filename.'
sys.exit(0)
elif parsed_args.json_filename:
script = script_from_json_file(parsed_args.json_filename)
elif parsed_args.yaml_filename:
script = script_from_yaml_file(parsed_args.yaml_filename)
else:
print >> sys.stderr, 'Must specify either --json_filename or --yaml_filename.'
sys.exit(0)
# Create the Director instance and begin serving.
director = Director(script)
DirectorRequestHandler.set_director(director)
# Serve on the specified port until the script is finished or not followed.
server = SocketServer.TCPServer(("", parsed_args.port), DirectorRequestHandler)
while (not DirectorRequestHandler._script_done and
not DirectorRequestHandler._script_error):
server.handle_request()
| 37.126888 | 89 | 0.695093 |
9721fffc15f2c57f19b24cf0fb6619bc0ada91c0 | 84 | py | Python | fpipe/meta/blocksize.py | vkvam/fpipe | 2905095f46923c6c4c460c3d154544b654136df4 | [
"MIT"
] | 18 | 2019-12-16T17:55:57.000Z | 2020-10-21T23:25:40.000Z | fpipe/meta/blocksize.py | vkvam/fpipe | 2905095f46923c6c4c460c3d154544b654136df4 | [
"MIT"
] | 23 | 2019-12-11T14:15:08.000Z | 2020-02-17T12:53:21.000Z | fpipe/meta/blocksize.py | vkvam/fpipe | 2905095f46923c6c4c460c3d154544b654136df4 | [
"MIT"
] | null | null | null | from fpipe.meta.abstract import FileData
class BlockSize(FileData[int]):
pass
| 14 | 40 | 0.761905 |
c2fa921dc92ca053397520e6bc5d6722598844cc | 557 | py | Python | Python Files/pickle1.py | Nmane1612/Nihar-Mane | aa4db52a384f706a431c994e4e12d8e5e0cd8402 | [
"Apache-2.0"
] | 3 | 2019-12-27T06:07:37.000Z | 2020-07-01T08:51:32.000Z | Python Files/pickle1.py | Nihar16/Personal-Repository | 0d319eeb8dbfa3f7c3eb8501829939ecd6923464 | [
"MIT"
] | null | null | null | Python Files/pickle1.py | Nihar16/Personal-Repository | 0d319eeb8dbfa3f7c3eb8501829939ecd6923464 | [
"MIT"
] | null | null | null | import pickle
class Human:
def __init__(self):
self.name=input("Enter your name : ")
self.age=input("Enter your age : ")
def disp(self):
print("Hello {}, You are {} year old!".format(self.name,self.age))
with open("Human.dat", "wb") as f:
insaan=Human()
pickle.dump(insaan,f)
#f.seek(0,0)
with open("Human.dat", "rb") as f:
try:
maanav=pickle.load(f)
maanav.disp()
print(maanav.name)
except EOFError:
print("Done with object")
| 24.217391 | 75 | 0.533214 |
03c52e6274b93da20235aa84376f605ad17a2f0e | 10,488 | py | Python | bumps/lsqerror.py | e-rus/bumps | 080ff80f939f3edf54a1fdc425e3f333d42ee8c4 | [
"MIT"
] | null | null | null | bumps/lsqerror.py | e-rus/bumps | 080ff80f939f3edf54a1fdc425e3f333d42ee8c4 | [
"MIT"
] | null | null | null | bumps/lsqerror.py | e-rus/bumps | 080ff80f939f3edf54a1fdc425e3f333d42ee8c4 | [
"MIT"
] | null | null | null | r"""
Least squares error analysis.
Given a data set with gaussian uncertainty on the points, and a model which
is differentiable at the minimum, the parameter uncertainty can be estimated
from the covariance matrix at the minimum. The model and data are wrapped in
a problem object, which must define the following methods:
============ ============================================
getp() get the current value of the model
setp(p) set a new value in the model
nllf(p) negative log likelihood function
residuals(p) residuals around its current value
bounds() get the bounds on the parameter p [optional]
============ ============================================
:func:`jacobian` computes the Jacobian matrix $J$ using numerical
differentiation on residuals. Derivatives are computed using the center
point formula, with two evaluations per dimension. If the problem has
analytic derivatives with respect to the fitting parameters available,
then these should be used to compute the Jacobian instead.
:func:`hessian` computes the Hessian matrix $H$ using numerical
differentiation on nllf. This uses the center point formula, with
two evaluations for each (i,j) combination.
:func:`cov` takes the Jacobian and computes the covariance matrix $C$.
:func:`corr` uses the off-diagonal elements of $C$ to compute correlation
coefficients $R^2_{ij}$ between the parameters.
:func:`stderr` computes the uncertain $\sigma_i$ from covariance matrix $C$,
assuming that the $C_\text{diag}$ contains $\sigma_i^2$, which should be
the case for functions which are approximately linear near the minimum.
:func:`max_correlation` takes $R^2$ and returns the maximum correlation.
The user should be shown the uncertainty $\sigma_i$ for each parameter,
and if there are strong parameter correlations (e.g., $R^2_\text{max} > 0.2$),
the correlation matrix as well.
The bounds method for the problem is optional, and is used only to determine
the step size needed for the numerical derivative. If bounds are not present
and finite, the current value for the parameter is used as a basis to
estimate step size.
"""
from __future__ import print_function
import numpy as np
#from . import numdifftools as nd
#import numdifftools as nd
# TODO: restructure lsqerror to use mapper for evaluating multiple f
# doesn't work for jacobian since mapper returns nllf; would need to
# expand mapper to implement a variety of different functions.
def jacobian(problem, p=None, step=None):
"""
Returns the derivative wrt the fit parameters at point p.
Numeric derivatives are calculated based on step, where step is
the portion of the total range for parameter j, or the portion of
point value p_j if the range on parameter j is infinite.
The current point is preserved.
"""
p_init = problem.getp()
if p is None:
p = p_init
p = np.asarray(p)
bounds = getattr(problem, 'bounds', lambda: None)()
def f(p):
problem.setp(p)
return problem.residuals()
J = _jacobian_forward(f, p, bounds, eps=step)
#J = nd.Jacobian(problem.residuals)(p)
problem.setp(p_init)
return J
def _jacobian_forward(f, p, bounds, eps=None):
n = len(p)
# TODO: default to double precision epsilon
step = 1e-4 if eps is None else np.sqrt(eps)
fx = f(p)
#print("p",p,"step",step)
h = abs(p)*step
h[h == 0] = step
if bounds is not None:
h[h+p > bounds[1]] *= -1.0 # step backward if forward step is out of bounds
ee = np.diag(h)
J = []
for i in range(n):
J.append((f(p + ee[i, :]) - fx)/h[i])
return np.vstack(J).T
def _jacobian_central(f, p, bounds, eps=None):
n = len(p)
# TODO: default to double precision epsilon
step = 1e-4 if eps is None else np.sqrt(eps)
#print("p",p,"step",step)
h = abs(p)*step
h[h == 0] = step
#if bounds is not None:
# h[h+p>bounds[1]] *= -1.0 # step backward if forward step is out of bounds
ee = np.diag(h)
J = []
for i in range(n):
J.append((f(p + ee[i, :]) - f(p - ee[i, :])) / (2.0*h[i]))
return np.vstack(J).T
def hessian(problem, p=None, step=None):
"""
Returns the derivative wrt to the fit parameters at point p.
The current point is preserved.
"""
p_init = problem.getp()
if p is None:
p = p_init
p = np.asarray(p)
bounds = getattr(problem, 'bounds', lambda: None)()
H = _hessian_forward(problem.nllf, p, bounds=bounds, eps=step)
#H = nd.Hessian(problem.nllf)(p)
#print("Hessian",H)
problem.setp(p_init)
return H
def _hessian_forward(f, p, bounds, eps=None):
# type: (Callable[[np.ndarray], float], np.ndarray, Optional[np.ndarray]) -> np.ndarray
"""
Forward difference Hessian.
"""
n = len(p)
# TODO: default to double precision epsilon
step = 1e-4 if eps is None else np.sqrt(eps)
fx = f(p)
#print("p",p,"step",step)
h = abs(p)*step
h[h == 0] = step
if bounds is not None:
h[h+p > bounds[1]] *= -1.0 # step backward if forward step is out of bounds
ee = np.diag(h)
g = np.empty(n, 'd')
for i in range(n):
g[i] = f(p + ee[i, :])
#print("fx",fx)
#print("h",h, h[0])
#print("g",g)
H = np.empty((n, n), 'd')
for i in range(n):
for j in range(i, n):
fx_ij = f(p + ee[i, :] + ee[j, :])
#print("fx_%d%d=%g"%(i,j,fx_ij))
H[i, j] = (fx_ij - g[i] - g[j] + fx) / (h[i]*h[j])
H[j, i] = H[i, j]
return H
def _hessian_central(f, p, bounds, eps=None):
# type: (Callable[[np.ndarray], float], np.ndarray, Optional[np.ndarray]) -> np.ndarray
"""
Central difference Hessian.
"""
n = len(p)
# TODO: default to double precision epsilon
step = 1e-4 if eps is None else np.sqrt(eps)
#step = np.sqrt(step)
fx = f(p)
h = abs(p)*step
h[h == 0] = step
# TODO: handle bounds on central difference formula
#if bounds is not None:
# h[h+p>bounds[1]] *= -1.0 # step backward if forward step is out of bounds
ee = np.diag(h)
gp = np.empty(n, 'd')
gm = np.empty(n, 'd')
for i in range(n):
gp[i] = f(p + ee[i, :])
gm[i] = f(p - ee[i, :])
H = np.empty((n, n), 'd')
for i in range(n):
for j in range(i, n):
fp_ij = f(p + ee[i, :] + ee[j, :])
fm_ij = f(p - ee[i, :] - ee[j, :])
#print("fx_%d%d=%g"%(i,j,fx_ij))
H[i, j] = (fp_ij - gp[i] - gp[j] + fm_ij - gm[i] - gm[j] + 2.0*fx) / (2.0*h[i]*h[j])
H[j, i] = H[i,j]
return H
def perturbed_hessian(H, scale=None):
"""
Adjust Hessian matrix to be positive definite.
Returns the adjusted Hessian and its Cholesky decomposition.
"""
from .quasinewton import modelhess
n = H.shape[0]
if scale is None:
scale = np.ones(n)
macheps = np.finfo('d').eps
return modelhess(n, scale, macheps, H)
def chol_stderr(L):
"""
Return parameter uncertainty from the Cholesky decomposition of the
Hessian matrix, as returned, e.g., from the quasi-Newton optimizer BFGS
or as calculated from :func:`perturbed_hessian` on the output of
:func:`hessian` applied to the cost function problem.nllf.
"""
return np.sqrt(1. / np.diag(L))
def chol_cov(L):
"""
Given the cholesky decomposition of the Hessian matrix H, compute
the covariance matrix $C = H^{-1}$
"""
Linv = np.linalg.inv(L)
return np.dot(Linv.T.conj(), Linv)
def cov(J, tol=1e-8):
"""
Given Jacobian J, return the covariance matrix inv(J'J).
We provide some protection against singular matrices by setting
singular values smaller than tolerance *tol* to the tolerance
value.
"""
# Find cov of f at p
# cov(f,p) = inv(J'J)
# Use SVD
# J = U S V'
# J'J = (U S V')' (U S V')
# = V S' U' U S V'
# = V S S V'
# inv(J'J) = inv(V S S V')
# = inv(V') inv(S S) inv(V)
# = V inv (S S) V'
u, s, vh = np.linalg.svd(J, 0)
s[s <= tol] = tol
JTJinv = np.dot(vh.T.conj() / s ** 2, vh)
return JTJinv
def corr(C):
"""
Convert covariance matrix $C$ to correlation matrix $R^2$.
Uses $R = D^{-1} C D^{-1}$ where $D$ is the square root of the diagonal
of the covariance matrix, or the standard error of each variable.
"""
Dinv = 1. / stderr(cov)
return np.dot(Dinv, np.dot(cov, Dinv))
def max_correlation(Rsq):
"""
Return the maximum correlation coefficient for any pair of variables
in correlation matrix Rsq.
"""
return np.max(np.tril(Rsq, k=-1))
def stderr(C):
r"""
Return parameter uncertainty from the covariance matrix C.
This is just the square root of the diagonal, without any correction
for covariance.
If measurement uncertainty is unknown, scale the returned uncertainties
by $\sqrt{\chi^2_N}$, where $\chi^2_N$ is the sum squared residuals
divided by the degrees of freedom. This will match the uncertainty on
the parameters to the observed scatter assuming the model is correct and
the fit is optimal. This will also be appropriate for weighted fits
when the true measurement uncertainty dy_i is known up to a scaling
constant for all y_i.
Standard error on scipy.optimize.curve_fit always includes the chisq
correction, whereas scipy.optimize.leastsq never does.
"""
return np.sqrt(np.diag(C))
def demo_hessian():
rosen = lambda x: (1.-x[0])**2 + 105*(x[1]-x[0]**2)**2
p = np.array([1., 1.])
H = _hessian_forward(rosen, p, bounds=None, eps=1e-16)
print("forward difference H", H)
H = _hessian_central(rosen, p, bounds=None, eps=1e-16)
print("central difference H", H)
#from . import numdifftools as nd
#import numdifftools as nd
#Hfun = nd.Hessian(rosen)
#print("numdifftools H", Hfun(p))
def demo_jacobian():
y = np.array([1., 2., 3.])
f = lambda x: x[0]*y + x[1]
p = np.array([2., 3.])
J = _jacobian_forward(f, p, bounds=None, eps=1e-16)
print("forward difference J", J)
J = _jacobian_central(f, p, bounds=None, eps=1e-16)
print("central difference J", J)
#from . import numdifftools as nd
#import numdifftools as nd
#Jfun = nd.Jacobian(f)
#print("numdifftools J", Jfun(p))
if __name__ == "__main__":
demo_hessian()
demo_jacobian()
| 32.073394 | 96 | 0.617849 |
15a3b691f3c2c5cca60cfc69d2db93f09b5e997a | 340 | py | Python | caddy-gen/src/config.py | ElectronicElephant/mirror-docker-unified | d8d006fd19a7d7ef497bc025665fc7ff6fec53f9 | [
"Apache-2.0"
] | 16 | 2020-11-03T03:14:02.000Z | 2021-02-06T11:21:41.000Z | caddy-gen/src/config.py | ElectronicElephant/mirror-docker-unified | d8d006fd19a7d7ef497bc025665fc7ff6fec53f9 | [
"Apache-2.0"
] | 34 | 2020-11-02T15:57:46.000Z | 2021-02-11T03:11:32.000Z | caddy-gen/src/config.py | ElectronicElephant/mirror-docker-unified | d8d006fd19a7d7ef497bc025665fc7ff6fec53f9 | [
"Apache-2.0"
] | 4 | 2021-03-30T06:54:37.000Z | 2022-01-09T10:17:57.000Z | BASES = {
"siyuan": ['mirror.sjtu.edu.cn'],
"zhiyuan": ['mirrors.sjtug.sjtu.edu.cn'],
"local": [':80']
}
LUG_ADDR = 'lug:7001'
SPEEDTEST_ADDR = 'speedtest:8989'
FRONTEND_DIR = '/dists'
NODE_EXPORTER_ADDR = '172.31.0.1:9100'
CADVISOR_ADDR = 'cadvisor:8080'
MIRROR_INTEL_ADDR = 'mirror-intel:8000'
LUG_EXPORTER_ADDR = 'lug:8081'
| 24.285714 | 45 | 0.676471 |
86454be8ba6b4e73605bc34fbadbb56e722a2deb | 5,211 | py | Python | automation/services/coda-user-agent/agent.py | arjundashrath/mina | 629c6d767fbf2fdb7ce1ebd04337960c6f5baf55 | [
"Apache-2.0"
] | 929 | 2020-10-02T07:23:16.000Z | 2022-03-31T15:02:09.000Z | automation/services/coda-user-agent/agent.py | arjundashrath/mina | 629c6d767fbf2fdb7ce1ebd04337960c6f5baf55 | [
"Apache-2.0"
] | 3,154 | 2020-09-29T15:47:44.000Z | 2022-03-31T16:22:28.000Z | automation/services/coda-user-agent/agent.py | arjundashrath/mina | 629c6d767fbf2fdb7ce1ebd04337960c6f5baf55 | [
"Apache-2.0"
] | 216 | 2020-09-29T19:47:41.000Z | 2022-03-27T08:44:29.000Z | from CodaClient import Client, Currency, CurrencyFormat
import os
import schedule
import time
import urllib3
import random
from requests.exceptions import ConnectionError
from prometheus_client import Counter, start_http_server
def getenv_default_map(env_var: str, f, default):
value = os.getenv(env_var)
if value == None:
return default
else:
return f(value)
def getenv_str(env_var: str, default: str) -> str:
return os.getenv(env_var, default).strip()
def getenv_int(env_var: str, default: int) -> int:
return getenv_default_map(env_var, int, default)
def getenv_currency(env_var: str, lower_bound: Currency, upper_bound: Currency) -> Currency:
return getenv_default_map(env_var, Currency, Currency.random(lower_bound, upper_bound))
CODA_PUBLIC_KEY = getenv_str("CODA_PUBLIC_KEY", "4vsRCVyVkSRs89neWnKPrnz4FRPmXXrWtbsAQ31hUTSi41EkbptYaLkzmxezQEGCgZnjqY2pQ6mdeCytu7LrYMGx9NiUNNJh8XfJYbzprhhJmm1ZjVbW9ZLRvhWBXRqes6znuF7fWbECrCpQ")
MINA_PRIVKEY_PASS = getenv_str("MINA_PRIVKEY_PASS", "naughty blue worm")
AGENT_MIN_FEE = getenv_currency("AGENT_MIN_FEE", Currency("0.06"), Currency("0.1"))
AGENT_MAX_FEE = getenv_currency("AGENT_MAX_FEE", AGENT_MIN_FEE, AGENT_MIN_FEE + Currency("0.2"))
AGENT_MIN_TX = getenv_currency("AGENT_MIN_TX", Currency("0.0015"), Currency("0.005"))
AGENT_MAX_TX = getenv_currency("AGENT_MAX_TX", AGENT_MIN_TX, AGENT_MIN_TX + Currency("0.01"))
AGENT_TX_BATCH_SIZE = getenv_int("AGENT_TX_BATCH_SIZE", 1)
AGENT_SEND_EVERY_MINS = getenv_int("AGENT_SEND_EVERY_MINS", random.randint(1, 5))
AGENT_METRICS_PORT = getenv_int("AGENT_METRICS_PORT", 8000)
CODA_CLIENT_ARGS = {
"graphql_host": getenv_str("CODA_HOST", "localhost"),
"graphql_port": getenv_str("CODA_PORT", "3085")
}
## Prometheus Metrics
TRANSACTIONS_SENT = Counter('transactions_sent', 'Number of transactions agent has sent since boot.')
TRANSACTION_ERRORS = Counter('transaction_errors', 'Number of errors that occurred while sending transactions.')
class Agent(object):
"""Represents a generic agent that operates on the coda blockchain"""
def __init__(self, client_args, public_key, privkey_pass, min_tx_amount=AGENT_MIN_TX, max_tx_amount=AGENT_MAX_TX, min_fee_amount=AGENT_MIN_FEE, max_fee_amount=AGENT_MAX_FEE):
self.coda = Client(**client_args)
self.public_key = public_key
self.privkey_pass = privkey_pass
self.min_tx_amount = min_tx_amount
self.max_tx_amount = max_tx_amount
self.min_fee_amount = min_fee_amount
self.max_fee_amount = max_fee_amount
self.to_account = None
def get_to_account(self):
if not self.to_account:
print("Getting new wallet to send to...")
response = self.coda.create_wallet(self.privkey_pass)
self.to_account = response["createAccount"]["publicKey"]
print("Public Key: {}".format(self.to_account))
return self.to_account
def unlock_wallet(self):
response = self.coda.unlock_wallet(self.public_key, self.privkey_pass)
print("Unlocked Wallet!")
return response
def send_transaction(self):
print("---Sending Transaction---")
try:
to_account = self.get_to_account()
print("Trying to unlock Wallet!")
self.unlock_wallet()
except ConnectionError:
print("Transaction Failed due to connection error... is the Daemon running?")
TRANSACTION_ERRORS.inc()
return None
except Exception as e:
print("Error unlocking wallet...")
print(e)
return None
tx_amount = Currency.random(self.min_tx_amount, self.max_tx_amount)
fee_amount = Currency.random(self.min_fee_amount, self.max_fee_amount)
try:
response = self.coda.send_payment(to_account, self.public_key, tx_amount, fee_amount, memo="BeepBoop")
except Exception as e:
print("Error sending transaction...", e)
TRANSACTION_ERRORS.inc()
return None
if not response.get("errors", None):
print("Sent a Transaction {}".format(response))
TRANSACTIONS_SENT.inc()
else:
print("Error sending transaction: Request: {} Response: {}".format(self.public_key, response))
TRANSACTION_ERRORS.inc()
return response
def send_transaction_batch(self):
responses = []
for i in range(AGENT_TX_BATCH_SIZE):
responses.append(self.send_transaction())
return responses
def main():
agent = Agent(CODA_CLIENT_ARGS, CODA_PUBLIC_KEY, MINA_PRIVKEY_PASS)
schedule.every(AGENT_SEND_EVERY_MINS).minutes.do(agent.send_transaction_batch)
print("Sending a transaction every {} minutes.".format(AGENT_SEND_EVERY_MINS))
while True:
schedule.run_pending()
sleep_time = 10
print("Sleeping for {} seconds...".format(sleep_time))
time.sleep(sleep_time)
if __name__ == "__main__":
print("Starting up...")
start_http_server(AGENT_METRICS_PORT)
print("Metrics on Port {}".format(AGENT_METRICS_PORT))
print("Sleeping for 20 minutes...")
time.sleep(60*20)
main()
| 40.395349 | 195 | 0.701401 |
aabeb2024104aa7b0084645d68d6a99243fbf766 | 3,992 | py | Python | dialogue-engine/test/programytest/config/brain/test_services.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 104 | 2020-03-30T09:40:00.000Z | 2022-03-06T22:34:25.000Z | dialogue-engine/test/programytest/config/brain/test_services.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 25 | 2020-06-12T01:36:35.000Z | 2022-02-19T07:30:44.000Z | dialogue-engine/test/programytest/config/brain/test_services.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 10 | 2020-04-02T23:43:56.000Z | 2021-05-14T13:47:01.000Z | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.brain.services import BrainServicesConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class BrainServicesConfigurationTests(unittest.TestCase):
def test_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
services:
REST:
classname: programy.services.rest.GenericRESTService
method: GET
host: 0.0.0.0
Pannous:
classname: programy.services.pannous.PannousService
url: http://weannie.pannous.com/api
Pandora:
classname: programy.services.pandora.PandoraService
url: http://www.pandorabots.com/pandora/talk-xml
Wikipedia:
classname: programy.services.wikipediaservice.WikipediaService
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
services_config = BrainServicesConfiguration()
services_config.load_config_section(yaml, brain_config, ".")
self.assertTrue(services_config.exists("REST"))
self.assertTrue(services_config.exists("Pannous"))
self.assertTrue(services_config.exists("Pandora"))
self.assertTrue(services_config.exists("Wikipedia"))
self.assertFalse(services_config.exists("Other"))
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
services:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
services_config = BrainServicesConfiguration()
services_config.load_config_section(yaml, brain_config, ".")
self.assertFalse(services_config.exists("REST"))
self.assertFalse(services_config.exists("Pannous"))
self.assertFalse(services_config.exists("Pandora"))
self.assertFalse(services_config.exists("Wikipedia"))
self.assertFalse(services_config.exists("Other"))
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
services_config = BrainServicesConfiguration()
services_config.load_config_section(yaml, brain_config, ".")
self.assertFalse(services_config.exists("REST"))
self.assertFalse(services_config.exists("Pannous"))
self.assertFalse(services_config.exists("Pandora"))
self.assertFalse(services_config.exists("Wikipedia"))
self.assertFalse(services_config.exists("Other"))
| 43.391304 | 126 | 0.693888 |
d6d73857f6c360a07d370d03e4f7452d9cfab30f | 828 | py | Python | Python/OOP/abstract.py | salikansari6/interview-prep | 17e98fbb15f87c9f2ecd293896e613f5fe01d3a3 | [
"MIT"
] | 1 | 2021-08-23T18:18:41.000Z | 2021-08-23T18:18:41.000Z | Python/OOP/abstract.py | salikansari6/interview-prep | 17e98fbb15f87c9f2ecd293896e613f5fe01d3a3 | [
"MIT"
] | null | null | null | Python/OOP/abstract.py | salikansari6/interview-prep | 17e98fbb15f87c9f2ecd293896e613f5fe01d3a3 | [
"MIT"
] | 1 | 2021-08-24T15:40:15.000Z | 2021-08-24T15:40:15.000Z | from abc import ABC, abstractmethod
class Computer(ABC):
@abstractmethod
def process(self):
pass
class Laptop(Computer):
def process(self):
print("running")
# com = Computer() => TypeError: Can't instantiate abstract class Computer with abstract methods process
com = Laptop()
com.process()
# Python program showing
# abstract base class work
class Animal(ABC):
def move(self):
pass
class Human(Animal):
def move(self):
print("I can walk and run")
class Snake(Animal):
def move(self):
print("I can crawl")
class Dog(Animal):
def move(self):
print("I can bark")
class Lion(Animal):
def move(self):
print("I can roar")
# Driver code
R = Human()
R.move()
K = Snake()
K.move()
R = Dog()
R.move()
K = Lion()
K.move()
| 12.545455 | 104 | 0.614734 |
af67a4bfb7306e2100da84d0dda7f109248bbb03 | 6,444 | py | Python | radon_transformation/radon.py | drgHannah/Radon-Transformation | 68eac9c3c4747b0c1580be83b5967b0c58d028be | [
"MIT"
] | 1 | 2022-03-29T06:19:36.000Z | 2022-03-29T06:19:36.000Z | radon_transformation/radon.py | drgHannah/Radon-Transformation | 68eac9c3c4747b0c1580be83b5967b0c58d028be | [
"MIT"
] | null | null | null | radon_transformation/radon.py | drgHannah/Radon-Transformation | 68eac9c3c4747b0c1580be83b5967b0c58d028be | [
"MIT"
] | null | null | null | '''
File name: radon.py
Author: Hannah Dröge
Date created: 4/22/2021
Python Version: 3.6
'''
import torch
import numpy as np
import matplotlib.pyplot as plt
from radon_transformation.filter import rampfilter
class radon(torch.nn.Module):
'''
Radon Transformation
Args:
n_angles (int): number of projection angles for radon tranformation (default: 1000)
image_size (int): edge length of input image (default: 400)
device: (str): device can be either "cuda" or "cpu" (default: cuda)
'''
def __init__(self, n_angles=1000, image_size=400, device="cuda"):
super(radon, self).__init__()
self.n_angles=n_angles
# get angles
thetas = torch.linspace(0, np.pi-(np.pi/n_angles), n_angles)[:,None,None].to(device)
cos_al, sin_al = thetas.cos(), thetas.sin()
zeros = torch.zeros_like(cos_al)
# calculate rotations
rotations = torch.stack((cos_al,sin_al,zeros,-sin_al, cos_al,zeros),-1).reshape(-1,2,3)
self.rotated = torch.nn.functional.affine_grid(rotations, torch.Size([n_angles, 1, image_size, image_size]), align_corners=True).reshape(1,-1,image_size,2)
def forward(self, image):
'''Apply radon transformation on input image.
Args:
image (torch.tensor, (bzs, 1, W, H)): input image
Returns:
out (torch.tensor, (bzs, 1, W, angles)): sinogram
'''
bsz, _, shape_size, _ = image.shape
out_fl = torch.nn.functional.grid_sample(image, self.rotated.repeat(bsz,1,1,1), align_corners=True).reshape(bsz,1,self.n_angles,shape_size,shape_size)
out = out_fl.sum(3).permute(0,1,3,2)
return out
class fbp(torch.nn.Module):
'''
Filtered Backprojection
Args:
n_angles (int): number of projection angles for filtered backprojection (default: 1000)
image_size (int): edge length of input image (default: 400)
circle (bool): project image values outside of circle to zero (default: False)
filtered (bool): apply filter (default: True)
device: (str): device can be either "cuda" or "cpu" (default: cuda)
'''
def __init__(self, n_angles=1000, image_size=400, circle = False, filtered=True, device="cuda"):
super().__init__()
self.image_size=image_size
det_count = image_size
self.step_size = image_size/det_count
self.n_angles = n_angles
self.circle=circle
self.filtered=filtered
# padding values
projection_size_padded = max(64, int(2 ** (2 * torch.tensor(det_count)).float().log2().ceil()))
self.pad_width = (projection_size_padded - det_count)
#filter
self.filter = rampfilter(projection_size_padded).to(device)
# get angles
thetas = torch.linspace(0, np.pi-(np.pi/n_angles), n_angles)[:,None,None]
# get grid [-1,1]
grid_y, grid_x = torch.meshgrid(torch.linspace(-1,1,image_size), torch.linspace(-1,1,image_size))
# get rotated grid
tgrid = (grid_x*thetas.cos() - grid_y*thetas.sin()).unsqueeze(-1)
y = torch.ones_like(tgrid) * torch.linspace(-1,1,n_angles)[:,None,None,None]
self.grid = torch.cat((y,tgrid),dim=-1).view(self.n_angles * self.image_size, self.image_size, 2)[None].to(device)
self.reconstruction_circle = (grid_x ** 2 + grid_y ** 2) <= 1
def forward(self, input):
'''Apply (filtered) backprojection on input sinogramm.
Args:
image (torch.tensor, (bzs, 1, W, angles)): sinogramm
Returns:
out (torch.tensor, (bzs, 1, W, H)): reconstructed image
'''
bsz, _, det_count, _ = input.shape
input = input.double()
if self.filtered:
# pad input
padded_input = torch.nn.functional.pad(input, [0, 0, 0, self.pad_width], mode='constant', value=0)
# apply filter
projection = torch.fft.fft(padded_input,dim=2) * self.filter[:,None].double()
radon_filtered = torch.real(torch.fft.ifft(projection,dim=2))[:, :, :det_count, :]
else:
radon_filtered = input
# reconstruct
grid = self.grid.repeat(bsz,1,1,1).double()
reconstructed = torch.nn.functional.grid_sample(radon_filtered, grid, mode="bilinear", padding_mode='zeros', align_corners=True)
reconstructed = reconstructed.view(bsz, self.n_angles, 1, self.image_size, self.image_size).sum(1)
reconstructed = reconstructed/self.step_size
# circle
if self.circle:
reconstructed_circle = self.reconstruction_circle.repeat(bsz,1,1,1).double()
reconstructed[reconstructed_circle==0] = 0.
return reconstructed * np.pi / (2 * self.n_angles)
def get_operators(n_angles=380, image_size=400, circle = False, device='cuda'):
''' Creates Radon operator and Filtered Backprojection operator.
Args:
n_angles (int): number of projection angles for filtered backprojection (default: 1000)
image_size (int): edge length of input image (default: 400)
circle (bool): project image values outside of circle to zero (default: False)
device: (str): device can be either "cuda" or "cpu" (default: cuda)
Returns:
radon_op (radon): Radon operator
fbp_op (fbp): Filtered Backprojection operator
'''
radon_op = radon(n_angles=n_angles, image_size=image_size, device=device)
fbp_op = fbp(n_angles=n_angles, image_size=image_size, circle=circle, device=device)
return radon_op, fbp_op
def test_adjoint():
''' Tests if Radon operator and Backprojection operator are adjoint
by running <radon(x),y> / <x,fbp(y)>.
'''
n_angles = 50
image_size = 100
device = 'cpu'
# load operators
radon_op = radon(n_angles=n_angles, image_size=image_size, device=device)
fbp_op = fbp(n_angles=n_angles, image_size=image_size, circle=False, device=device, filtered=False)
# run operators on random tensors
x = torch.rand([1,1,image_size,image_size]).to(device)
y = torch.rand([1,1,image_size,n_angles]).to(device)
leftside = torch.sum(radon_op(x) * y).item()
rightside = torch.sum(x * fbp_op(y)).item()
# print
print("\n<Ax,y>=", leftside," ----- <x,A'y>=", rightside)
print('\n leftside/rightside: ',leftside/rightside)
return leftside/rightside
| 42.117647 | 163 | 0.640596 |
3a24c2fc043c07f6f026f0f9aa53af4710becf01 | 1,170 | py | Python | Scripts/changeFont_interactive.py | Ifelsethendo/Blender-Game-Engine-Templates | b4cd92ee7e1c4e9ac8d10b4d843392856285f6ba | [
"Apache-2.0"
] | 5 | 2021-12-11T02:27:43.000Z | 2022-01-08T08:51:21.000Z | Scripts/changeFont_interactive.py | Ifelsethendo/Blender-Game-Engine-Templates | b4cd92ee7e1c4e9ac8d10b4d843392856285f6ba | [
"Apache-2.0"
] | null | null | null | Scripts/changeFont_interactive.py | Ifelsethendo/Blender-Game-Engine-Templates | b4cd92ee7e1c4e9ac8d10b4d843392856285f6ba | [
"Apache-2.0"
] | 2 | 2021-12-29T05:48:58.000Z | 2022-03-05T15:31:33.000Z | import bpy
import glob
import bge
import os
## get a list of all the fonts installed on Windows modify for your OS or font directory
fontList = glob.glob("C:\\Windows\\Fonts\\*.ttf")
## define this scripted text object to the game engine
scene = bge.logic.getCurrentScene()
cont = bge.logic.getCurrentController()
TextObject = cont.owner
TextObject['length'] = len(fontList)
TextObject['init'] = True
## choose font by current value of scripted objects count property
def cycleFonts():
if hasattr(TextObject.blenderObject.data, 'font' ) is True:
fontName = os.path.basename(fontList[TextObject['count']])
TextObject['Font'] = fontName
## if the font is unsupported then report with error
try:
newFont = bpy.data.fonts.load(fontList[TextObject['count']])
TextObject['Text'] = os.path.basename(fontList[TextObject['count']])
TextObject.blenderObject.data.font = newFont
except:
TextObject['Text'] = "error:\n" + fontName
TextObject['Font'] = "error:" + fontName
| 36.5625 | 88 | 0.618803 |
90d5d107294a7fdbf61ee095c8fc4ed4b58f0fd9 | 7,623 | py | Python | neko3/features/noaa/cog.py | Natsurii/nicabot-monkee | 0f32132184c31bea0015f232c0abf3ec993129fa | [
"MIT"
] | null | null | null | neko3/features/noaa/cog.py | Natsurii/nicabot-monkee | 0f32132184c31bea0015f232c0abf3ec993129fa | [
"MIT"
] | null | null | null | neko3/features/noaa/cog.py | Natsurii/nicabot-monkee | 0f32132184c31bea0015f232c0abf3ec993129fa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
NOAA cog implementation.
"""
import io
import discord
import neko3.cog
from neko3 import neko_commands
from neko3 import pagination
from neko3 import theme
from . import utils
class NOAACog(neko3.cog.CogBase):
@neko_commands.group(name="noaa", brief="Gets info from NOAA regarding US weather.")
async def noaa_group(self, ctx):
return await neko_commands.send_usage(ctx)
async def _download(self, gif_url):
async with self.acquire_http_session() as http:
async with http.get(gif_url) as resp:
resp.raise_for_status()
data = await resp.read()
data = io.BytesIO(data)
return data
@noaa_group.command(name="us", brief="Shows the US weather overview.")
async def us_overview_command(self, ctx):
async with ctx.typing():
png = await self._download(utils.OVERVIEW_MAP_US)
embed = theme.generic_embed(ctx, title=ctx.command.brief, description=utils.OVERVIEW_BASE)
embed.set_image(url="attachment://image.png")
await ctx.send(utils.OVERVIEW_BASE, file=discord.File(png, "image.png"))
@noaa_group.command(name="alaska", aliases=["ak"], brief="Shows the Alaskan weather overview.")
async def alaska_overview_command(self, ctx):
async with ctx.typing():
png = await self._download(utils.OVERVIEW_MAP_AK)
embed = theme.generic_embed(ctx, title=ctx.command.brief, description=utils.OVERVIEW_BASE)
embed.set_image(url="attachment://image.png")
await ctx.send(utils.OVERVIEW_BASE, file=discord.File(png, "image.png"))
@noaa_group.command(name="hawaii", aliases=["hi"], brief="Shows the Hawaiian weather overview.")
async def hawaii_overview_command(self, ctx):
async with ctx.typing():
png = await self._download(utils.OVERVIEW_MAP_HI)
embed = theme.generic_embed(ctx, title=ctx.command.brief, description=utils.OVERVIEW_BASE)
embed.set_image(url="attachment://image.png")
await ctx.send(utils.OVERVIEW_BASE, file=discord.File(png, "image.png"))
@noaa_group.group(name="radar", brief="Country-wide radar view for the US.")
async def radar_group(self, ctx, *, region=None):
"""
Call with the highres argument to send a full image. Pass a region name to view
that region instead.
"""
if region is None:
await self.radar_base(ctx)
else:
await self.radar_regional_search(ctx, region)
async def radar_base(self, ctx):
async with ctx.typing():
gif = await self._download(utils.RADAR_US)
embed = theme.generic_embed(
ctx,
title=ctx.command.brief,
description=f"{utils.OVERVIEW_BASE}\n\nRun with the `highres` argument for higher resolution!",
)
embed.set_image(url="attachment://image.gif")
await ctx.send(embed=embed, file=discord.File(gif, "image.gif"))
async def radar_regional_search_command(self, ctx, site):
async with ctx.typing():
name, url = utils.get_wide_urls_radar_closest_match(site)
gif = await self._download(url)
await ctx.send(
f"Closest match was {name} -- {utils.OVERVIEW_BASE}", file=discord.File(gif, "hawaii-overview.gif")
)
@radar_group.command(name="highres", aliases=["hires"], brief="Country-wide radar view for the US...BUT BIGGER")
async def high_res_us_radar_command(self, ctx):
async with ctx.typing():
gif = await self._download(utils.RADAR_FULL_US)
embed = theme.generic_embed(ctx, title=ctx.command.brief, description=utils.OVERVIEW_BASE)
embed.set_image(url="attachment://image.gif")
await ctx.send(embed=embed, file=discord.File(gif, "image.gif"))
@radar_group.command(name="hawaii", aliases=["hi"], brief="Shows the Hawaiian radar.")
async def hawaii_radar_command(self, ctx):
async with ctx.typing():
gif = await self._download(utils.get_wide_urls_radar_closest_match("hawaii")[1])
embed = theme.generic_embed(ctx, title=ctx.command.brief, description=utils.OVERVIEW_BASE)
embed.set_image(url="attachment://image.gif")
await ctx.send(embed=embed, file=discord.File(gif, "image.gif"))
@radar_group.command(name="alaska", aliases=["ak"], brief="Shows the Alaskan radar.")
async def alaska_radar_command(self, ctx):
async with ctx.typing():
gif = await self._download(utils.get_wide_urls_radar_closest_match("alaska")[1])
embed = theme.generic_embed(ctx, title=ctx.command.brief, description=utils.OVERVIEW_BASE)
embed.set_image(url="attachment://image.gif")
await ctx.send(embed=embed, file=discord.File(gif, "image.gif"))
@noaa_group.command(name="RIDGE", aliases=["ridge", "local"], brief="Shows local radar layers.")
async def local_command(self, ctx, *, area):
"""Search by a NEXRAD radar site, or the location of the radar."""
async with self.acquire_http_session() as http:
layers = await utils.generate_ridge_images_closest_match(http, area)
author = f"Closest match: {layers.radar_site}"
title = layers.radar_location
def embed(**kwargs):
if "image" in kwargs:
image = kwargs.pop("image")
else:
image = None
embed = theme.generic_embed(ctx, title=title, url=layers.web_page, **kwargs)
embed.set_author(name=author)
if image:
embed.set_image(url=image)
return embed
@pagination.embed_generator(max_chars=2048, provides_numbering=False)
def embed_generator(_, page, __):
return embed(description=page)
embeds = [
embed(description="Base reflectivity (up to 248nmi)", image=layers.base_reflectivity_248nmi),
embed(description="Base reflectivity (up to 124nm)", image=layers.base_reflectivity_124nm),
embed(description="One hour precipitation", image=layers.one_hour_precipitation),
embed(description="Composite reflectivity", image=layers.composite_reflectivity),
embed(description="Storm relative motion", image=layers.storm_relative_motion),
embed(description="Storm total precipitation", image=layers.storm_total_precipitation),
]
p = pagination.EmbedNavigatorFactory(max_lines=20, factory=embed_generator)
if layers.text_forecast:
p.add_lines(*layers.text_forecast.split("\n"))
else:
p.add_line("No warnings are in place...")
nav = p.build(ctx)
for page in nav.pages:
if embeds:
page.set_image(url=embeds[0].image["url"])
page.set_footer(text=embeds[0].description)
embeds.pop(0)
nav.pages = nav.pages + embeds
nav.start()
| 43.56 | 116 | 0.663912 |
87ef235f8847c4918c92f2cd34920503416d4a1f | 2,211 | py | Python | scripts/slave/recipe_modules/perf_dashboard/example.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/perf_dashboard/example.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/perf_dashboard/example.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'perf_dashboard',
'recipe_engine/path',
'recipe_engine/json',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/step',
]
# To run, pass these options into properties:
# slavename="multivm-windows-release",
# buildername="multivm-windows-perf-be",
# mastername="client.dart.fyi", buildnumber=75
def RunSteps(api):
s1 = api.perf_dashboard.get_skeleton_point('sunspider/string-unpack-code/ref',
33241, '18.5')
s1['supplemental_columns'] = {'r_webkit_rev': '167808'}
s1['error'] = '0.5'
s1['units'] = 'ms'
s2 = api.perf_dashboard.get_skeleton_point('sunspider/string-unpack-code',
33241, '18.4')
s2['supplemental_columns'] = {'r_webkit_rev': '167808'}
s2['error'] = '0.4898'
s2['units'] = 'ms'
api.perf_dashboard.set_default_config()
api.perf_dashboard.post([s1, s2])
api.perf_dashboard.add_dashboard_link(
api.step.active_result.presentation,
'sunspider/string-unpack-code',
33241,
bot='bot_name',
)
bisect_results = {
'try_job_id': 1,
'status': 'completed'
}
api.perf_dashboard.post_bisect_results(bisect_results)
def GenTests(api):
bisect_response = {
'post_data': {
'try_job_id': 1,
'status': 'completed'
},
'text': '',
'status_code': 200
}
for platform in ('linux', 'win', 'mac'):
for production in (True, False):
yield (api.test('%s%s' %
(platform, '_use_mirror'
if production else '')) + api.platform.name(platform) +
api.properties(use_mirror=production,
slavename='multivm-windows-release',
buildername='multivm-windows-perf-be',
buildnumber=75,
mastername='client.dart.fyi') +
api.step_data('Post bisect results',
stdout=api.json.output(bisect_response)))
| 31.585714 | 80 | 0.587517 |
f2b7b49c1d98c4f9bae84a6ace3357fe904739f4 | 115 | py | Python | 2019/07/27/Django Example App - YouTube Search With YouTube Data API/django_youtube_search/youtube_search/search/urls.py | kenjitagawa/youtube_video_code | ef3c48b9e136b3745d10395d94be64cb0a1f1c97 | [
"Unlicense"
] | 492 | 2019-06-25T12:54:31.000Z | 2022-03-30T12:38:28.000Z | 2019/07/27/Django Example App - YouTube Search With YouTube Data API/django_youtube_search/youtube_search/search/urls.py | kenjitagawa/youtube_video_code | ef3c48b9e136b3745d10395d94be64cb0a1f1c97 | [
"Unlicense"
] | 23 | 2019-10-01T01:36:08.000Z | 2022-02-10T12:46:16.000Z | 2019/07/27/Django Example App - YouTube Search With YouTube Data API/django_youtube_search/youtube_search/search/urls.py | kenjitagawa/youtube_video_code | ef3c48b9e136b3745d10395d94be64cb0a1f1c97 | [
"Unlicense"
] | 1,734 | 2019-06-03T06:25:13.000Z | 2022-03-31T23:57:53.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
]
| 16.428571 | 41 | 0.634783 |
4911b800835f8b5fe59e9e970e29d5a5708dd7ca | 3,116 | py | Python | Constructs/pd1_extracellular.py | amrosado/SequenceTools | f971b092bd3273a131157c4abe1c73d7c505d11e | [
"MIT"
] | 2 | 2020-05-11T02:16:49.000Z | 2020-09-07T08:41:53.000Z | Constructs/pd1_extracellular.py | amrosado/SequenceTools | f971b092bd3273a131157c4abe1c73d7c505d11e | [
"MIT"
] | null | null | null | Constructs/pd1_extracellular.py | amrosado/SequenceTools | f971b092bd3273a131157c4abe1c73d7c505d11e | [
"MIT"
] | null | null | null | from SequenceTools import SequenceTools
tools = SequenceTools(email="arosado@gatech.edu")
tools.import_sequence_by_ncbi_identifier("NM_008798.2")
tools.deconstruct_imported_cdna_sequence(tools.all_sequences["NM_008798.2"], "NM_008798.2", 288)
tools.make_new_deconstructed_sequence_from_deconstructed_sequence_peptide_range(tools.all_deconstructed_sequences["NM_008798.2"], 25, 169, "PD1_Extracellular")
dnaSeq = tools.return_dna_sequence_from_deconstructed_list(tools.all_deconstructed_sequences["PD1_Extracellular"]['deconstructedList'])
nhe1Seq = tools.create_seq_object_from_string("GCTAGC")
tools.deconstruct_dna_sequence(nhe1Seq, "NheI", False)
ecoR1Seq = tools.create_seq_object_from_string("GAATTC")
tools.deconstruct_dna_sequence(ecoR1Seq, "EcoRI", False)
seqPeptideSeq = tools.create_seq_object_from_string("ATGGGGATCCTTCCCAGCCCTGGGATGCCTGCGCTGCTCTCCCTCGTGAGCCTTCTCTCCGTGCTGCTGATGGGTTGCGTAGCT")
tools.deconstruct_dna_sequence(seqPeptideSeq, "SecretionSignal", True)
linker1Seq = tools.create_seq_object_from_string("GGTACC")
tools.deconstruct_dna_sequence(linker1Seq, "Linker1", True)
linker2Seq = tools.create_seq_object_from_string("GGTAGTGGTGGTAGTGGT")
tools.deconstruct_dna_sequence(linker2Seq, "Linker2", True)
apTagSeq = tools.create_seq_object_from_string("GGTCTGAATGATATTTTCGAAGCGCAGAAAATTGAATGGCATGAA")
tools.deconstruct_dna_sequence(apTagSeq, "APTag", True)
linker3eq = tools.create_seq_object_from_string("GGTAGCGGA")
tools.deconstruct_dna_sequence(linker3eq, "Linker3", True)
tevSeq = tools.create_seq_object_from_string("GAGAACCTATACTTCCAAGGA")
tools.deconstruct_dna_sequence(tevSeq, "TEV", True)
hisTagSeq = tools.create_seq_object_from_string("CACCACCATCATCACCAC")
tools.deconstruct_dna_sequence(hisTagSeq, "HIS", True)
stopCodonsSeq = tools.create_seq_object_from_string("TAGTAA")
tools.deconstruct_dna_sequence(stopCodonsSeq, "STOPS", True)
pd1ExtracellularPeptideSequence = tools.create_seq_object_from_string('LEVPNGPWRSLTFYPAWLTVSEGANATFTCSLSNWSEDLMLNWNRLSPSNQTEKQAAFCNGLSQPVQDARFQIIQLPNRHDFHMNILDTRRNDSGIYLCGAISLHPKAKIEESPGAELVVTERILETSTRYPSPSPKPEGRFQGM')
tools.create_construct_from_deconstructed_sequences(['SecretionSignal', 'Linker1', 'PD1_Extracellular', "Linker2", 'APTag', 'Linker3', 'TEV', 'HIS', 'STOPS'], 'PD1_Extracellular-APTag-TEV-HIS')
tools.create_construct_from_deconstructed_sequences(['NheI' , 'SecretionSignal', 'Linker1', 'PD1_Extracellular', "Linker2", 'APTag', 'Linker3', 'TEV', 'HIS', 'STOPS', 'EcoRI'], 'NheI-PD1_Extracellular-APTag-TEV-HIS-EcoRI')
pd1PeptideSequence = tools.create_seq_object_from_string('MWVRQVPWSFTWAVLQLSWQSGWLLEVPNGPWRSLTFYPAWLTVSEGANATFTCSLSNWSEDLMLNWNRLSPSNQTEKQAAFCNGLSQPVQDARFQIIQLPNRHDFHMNILDTRRNDSGIYLCGAISLHPKAKIEESPGAELVVTERILETSTRYPSPSPKPEGRFQGMVIGIMSALVGIPVLLLLAWALAVFCSTSMSEARGAGSKDDTLKEEPSAAPVPSVAYEELDFQGREKTPELPTACVHTEYATIVFTEGLGASAMGRRGSADGLQGPRPPRHEDGHCSWPL')
compare1 = tools.compare_peptide_construct_to_sequence(tools.all_constructs['PD1_Extracellular'], pd1ExtracellularPeptideSequence)
compare2 = tools.compare_peptide_construct_to_sequence(tools.all_constructs['NM_008798.2'], pd1PeptideSequence)
pass | 62.32 | 348 | 0.861361 |
7ef08ac4996a19e922cba380f6022fc6aea673a7 | 25,573 | py | Python | tfx_bsl/arrow/array_util_test.py | busunkim96/tfx-bsl | 17b5e8b95acf4ac5fada640789409cb356ebebed | [
"Apache-2.0"
] | null | null | null | tfx_bsl/arrow/array_util_test.py | busunkim96/tfx-bsl | 17b5e8b95acf4ac5fada640789409cb356ebebed | [
"Apache-2.0"
] | null | null | null | tfx_bsl/arrow/array_util_test.py | busunkim96/tfx-bsl | 17b5e8b95acf4ac5fada640789409cb356ebebed | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Tests for tfx_bsl.arrow.array_util."""
import itertools
import numpy as np
import pyarrow as pa
from tfx_bsl.arrow import array_util
from absl.testing import absltest
from absl.testing import parameterized
_LIST_TYPE_PARAMETERS = [
dict(testcase_name="list", list_type_factory=pa.list_),
dict(testcase_name="large_list", list_type_factory=pa.large_list),
]
class ArrayUtilTest(parameterized.TestCase):
def test_invalid_input_type(self):
functions_expecting_list_array = [
array_util.GetFlattenedArrayParentIndices,
]
functions_expecting_array = [array_util.GetArrayNullBitmapAsByteArray]
functions_expecting_binary_array = [array_util.GetBinaryArrayTotalByteSize]
for f in itertools.chain(functions_expecting_list_array,
functions_expecting_array,
functions_expecting_binary_array):
with self.assertRaises((TypeError, RuntimeError)):
f(1)
for f in functions_expecting_list_array:
with self.assertRaisesRegex(RuntimeError, "UNIMPLEMENTED"):
f(pa.array([1, 2, 3]))
for f in functions_expecting_binary_array:
with self.assertRaisesRegex(RuntimeError, "UNIMPLEMENTED"):
f(pa.array([[1, 2, 3]]))
@parameterized.named_parameters(*_LIST_TYPE_PARAMETERS)
def test_list_lengths(self, list_type_factory):
list_lengths = array_util.ListLengthsFromListArray(
pa.array([], type=list_type_factory(pa.int64())))
self.assertTrue(list_lengths.equals(pa.array([], type=pa.int64())))
list_lengths = array_util.ListLengthsFromListArray(
pa.array([[1., 2.], [], [3.]], type=list_type_factory(pa.float32())))
self.assertTrue(list_lengths.equals(pa.array([2, 0, 1], type=pa.int64())))
list_lengths = array_util.ListLengthsFromListArray(
pa.array([[1., 2.], None, [3.]], type=list_type_factory(pa.float64())))
self.assertTrue(list_lengths.equals(pa.array([2, 0, 1], type=pa.int64())))
@parameterized.named_parameters(*_LIST_TYPE_PARAMETERS)
def test_element_lengths_list_array(self, list_type_factory):
list_lengths = array_util.GetElementLengths(
pa.array([], type=list_type_factory(pa.int64())))
self.assertTrue(list_lengths.equals(pa.array([], type=pa.int64())))
list_lengths = array_util.GetElementLengths(
pa.array([[1., 2.], [], [3.]], list_type_factory(pa.float32())))
self.assertTrue(list_lengths.equals(pa.array([2, 0, 1], type=pa.int64())))
list_lengths = array_util.GetElementLengths(
pa.array([[1., 2.], None, [3.]], list_type_factory(pa.float64())))
self.assertTrue(list_lengths.equals(pa.array([2, 0, 1], type=pa.int64())))
@parameterized.named_parameters(*[
dict(testcase_name="binary", binary_like_type=pa.binary()),
dict(testcase_name="string", binary_like_type=pa.string()),
dict(testcase_name="large_binary", binary_like_type=pa.large_binary()),
dict(testcase_name="large_string", binary_like_type=pa.large_string()),
])
def test_element_lengths_binary_like(self, binary_like_type):
list_lengths = array_util.GetElementLengths(
pa.array([b"a", b"bb", None, b"", b"ccc"], type=binary_like_type))
self.assertTrue(list_lengths.equals(pa.array([1, 2, 0, 0, 3],
type=pa.int64())))
def test_element_lengths_unsupported_type(self):
with self.assertRaisesRegex(RuntimeError, "UNIMPLEMENTED"):
array_util.GetElementLengths(pa.array([1, 2, 3], type=pa.int32()))
def test_get_array_null_bitmap_as_byte_array(self):
array = pa.array([], type=pa.int32())
null_masks = array_util.GetArrayNullBitmapAsByteArray(array)
self.assertTrue(null_masks.equals(pa.array([], type=pa.uint8())))
array = pa.array([1, 2, None, 3, None], type=pa.int32())
null_masks = array_util.GetArrayNullBitmapAsByteArray(array)
self.assertTrue(
null_masks.equals(pa.array([0, 0, 1, 0, 1], type=pa.uint8())))
array = pa.array([1, 2, 3])
null_masks = array_util.GetArrayNullBitmapAsByteArray(array)
self.assertTrue(null_masks.equals(pa.array([0, 0, 0], type=pa.uint8())))
array = pa.array([None, None, None], type=pa.int32())
null_masks = array_util.GetArrayNullBitmapAsByteArray(array)
self.assertTrue(null_masks.equals(pa.array([1, 1, 1], type=pa.uint8())))
# Demonstrate that the returned array can be converted to a numpy boolean
# array w/o copying
np.testing.assert_equal(
np.array([True, True, True]), null_masks.to_numpy().view(np.bool))
@parameterized.named_parameters(*[
dict(
testcase_name="list",
list_type_factory=pa.list_,
parent_indices_type=pa.int32()),
dict(
testcase_name="large_list",
list_type_factory=pa.large_list,
parent_indices_type=pa.int64()),
])
def test_get_flattened_array_parent_indices(self, list_type_factory,
parent_indices_type):
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([], type=list_type_factory(pa.int32())))
self.assertTrue(indices.equals(pa.array([], type=parent_indices_type)))
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([[1.], [2.], [], [3., 4.]],
type=list_type_factory(pa.float32())))
self.assertTrue(
indices.equals(pa.array([0, 1, 3, 3], type=parent_indices_type)))
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([[1.], [2.], [], [3., 4.]],
type=list_type_factory(pa.float32())).slice(1))
self.assertTrue(
indices.equals(pa.array([0, 2, 2], type=parent_indices_type)))
indices = array_util.GetFlattenedArrayParentIndices(
pa.array([list(range(1024))],
type=list_type_factory(pa.int64())))
self.assertTrue(
indices.equals(pa.array([0] * 1024, type=parent_indices_type)))
@parameterized.named_parameters(*[
dict(testcase_name="binary", binary_like_type=pa.binary()),
dict(testcase_name="string", binary_like_type=pa.string()),
dict(testcase_name="large_binary", binary_like_type=pa.large_binary()),
dict(testcase_name="large_string", binary_like_type=pa.large_string()),
])
def test_get_binary_array_total_byte_size(self, binary_like_type):
array = pa.array([b"abc", None, b"def", b"", b"ghi"], type=binary_like_type)
self.assertEqual(9, array_util.GetBinaryArrayTotalByteSize(array))
sliced_1_2 = array.slice(1, 2)
self.assertEqual(3, array_util.GetBinaryArrayTotalByteSize(sliced_1_2))
sliced_2 = array.slice(2)
self.assertEqual(6, array_util.GetBinaryArrayTotalByteSize(sliced_2))
empty_array = pa.array([], type=binary_like_type)
self.assertEqual(0, array_util.GetBinaryArrayTotalByteSize(empty_array))
def test_indexin_integer(self):
values = pa.array([99, 42, 3, None])
# TODO(b/203116559): Change this back to [3, 3, 99] once arrow >= 5.0
# is required by TFDV.
value_set = pa.array([3, 4, 99])
actual = array_util.IndexIn(values, value_set)
actual.validate()
self.assertTrue(
actual.equals(pa.array([2, None, 0, None], type=pa.int32())))
@parameterized.parameters(
*(list(
itertools.product([pa.binary(), pa.large_binary()],
[pa.binary(), pa.large_binary()])) +
list(
itertools.product([pa.string(), pa.large_string()],
[pa.string(), pa.large_string()]))))
def test_indexin_binary_alike(self, values_type, value_set_type):
# Case #1: value_set does not contain null.
values = pa.array(["aa", "bb", "cc", None], values_type)
value_set = pa.array(["cc", "cc", "aa"], value_set_type)
actual = array_util.IndexIn(values, value_set)
actual.validate()
self.assertTrue(
actual.equals(pa.array([1, None, 0, None], type=pa.int32())),
"actual: {}".format(actual))
# Case #2: value_set contains nulls.
values = pa.array(["aa", "bb", "cc", None], values_type)
value_set = pa.array(["cc", None, None, "bb"], value_set_type)
actual = array_util.IndexIn(values, value_set)
actual.validate()
self.assertTrue(
actual.equals(pa.array([None, 2, 0, 1], type=pa.int32())),
"actual: {}".format(actual))
_MAKE_LIST_ARRAY_INVALID_INPUT_TEST_CASES = [
dict(
testcase_name="parent_indices_not_arrow_int64",
num_parents=1,
parent_indices=pa.array([0], type=pa.int32()),
values=pa.array([1]),
expected_error=RuntimeError,
expected_error_regexp="must be int64"
),
dict(
testcase_name="parent_indices_length_not_equal_to_values_length",
num_parents=1,
parent_indices=pa.array([0], type=pa.int64()),
values=pa.array([1, 2]),
expected_error=RuntimeError,
expected_error_regexp="values array and parent indices array must be of the same length"
),
dict(
testcase_name="num_parents_too_small",
num_parents=1,
parent_indices=pa.array([1], type=pa.int64()),
values=pa.array([1]),
expected_error=RuntimeError,
expected_error_regexp="Found a parent index 1 while num_parents was 1"
)
]
_MAKE_LIST_ARRAY_TEST_CASES = [
dict(
testcase_name="parents_are_all_null",
num_parents=5,
parent_indices=pa.array([], type=pa.int64()),
values=pa.array([], type=pa.int64()),
empty_list_as_null=True,
expected=pa.array([None, None, None, None, None],
type=pa.large_list(pa.int64()))),
dict(
testcase_name="leading_nulls",
num_parents=3,
parent_indices=pa.array([2], type=pa.int64()),
values=pa.array([1], type=pa.int64()),
empty_list_as_null=True,
expected=pa.array([None, None, [1]], type=pa.large_list(pa.int64())),
),
dict(
testcase_name="same_parent_and_some_nulls",
num_parents=4,
parent_indices=pa.array([0, 0, 0, 3, 3], type=pa.int64()),
values=pa.array(["a", "b", "c", "d", "e"], type=pa.binary()),
empty_list_as_null=True,
expected=pa.array([["a", "b", "c"], None, None, ["d", "e"]],
type=pa.large_list(pa.binary()))),
dict(
testcase_name="parents_are_all_empty",
num_parents=5,
parent_indices=pa.array([], type=pa.int64()),
values=pa.array([], type=pa.int64()),
empty_list_as_null=False,
expected=pa.array([[], [], [], [], []],
type=pa.large_list(pa.int64()))),
dict(
testcase_name="leading_empties",
num_parents=3,
parent_indices=pa.array([2], type=pa.int64()),
values=pa.array([1]),
empty_list_as_null=False,
expected=pa.array([[], [], [1]], type=pa.large_list(pa.int64())),
),
dict(
testcase_name="same_parent_and_some_empties",
num_parents=4,
parent_indices=pa.array([0, 0, 0, 3, 3], type=pa.int64()),
values=pa.array(["a", "b", "c", "d", "e"], type=pa.binary()),
empty_list_as_null=False,
expected=pa.array([["a", "b", "c"], [], [], ["d", "e"]],
type=pa.large_list(pa.binary())),
),
]
class MakeListArrayFromParentIndicesAndValuesTest(parameterized.TestCase):
@parameterized.named_parameters(*_MAKE_LIST_ARRAY_INVALID_INPUT_TEST_CASES)
def testInvalidInput(self, num_parents, parent_indices, values,
expected_error, expected_error_regexp):
with self.assertRaisesRegex(expected_error, expected_error_regexp):
array_util.MakeListArrayFromParentIndicesAndValues(
num_parents, parent_indices, values)
@parameterized.named_parameters(*_MAKE_LIST_ARRAY_TEST_CASES)
def testMakeListArray(self, num_parents, parent_indices, values,
empty_list_as_null, expected):
actual = array_util.MakeListArrayFromParentIndicesAndValues(
num_parents, parent_indices, values, empty_list_as_null)
actual.validate()
if not empty_list_as_null:
self.assertEqual(actual.null_count, 0)
self.assertTrue(
actual.equals(expected),
"actual: {}, expected: {}".format(actual, expected))
_COO_FROM_LIST_ARRAY_TEST_CASES = [
dict(
testcase_name="flat_array",
list_array=[1, 2, 3, 4],
expected_coo=[0, 1, 2, 3],
expected_dense_shape=[4],
array_types=[pa.int32()],
),
dict(
testcase_name="empty_array",
list_array=[],
expected_coo=[],
expected_dense_shape=[0],
array_types=[pa.int64()],
),
dict(
testcase_name="empty_2d_array",
list_array=[[]],
expected_coo=[],
expected_dense_shape=[1, 0],
array_types=[pa.list_(pa.int64()),
pa.large_list(pa.string())]),
dict(
testcase_name="2d_ragged",
list_array=[["a", "b"], ["c"], [], ["d", "e"]],
expected_coo=[0, 0, 0, 1, 1, 0, 3, 0, 3, 1],
expected_dense_shape=[4, 2],
array_types=[pa.list_(pa.string()),
pa.large_list(pa.large_binary())]),
dict(
testcase_name="3d_ragged",
list_array=[[["a", "b"], ["c"]], [[], ["d", "e"]]],
expected_coo=[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1],
expected_dense_shape=[2, 2, 2],
array_types=[
pa.list_(pa.list_(pa.binary())),
pa.large_list(pa.large_list(pa.binary())),
pa.large_list(pa.list_(pa.binary())),
pa.list_(pa.large_list(pa.binary())),
],
),
]
class CooFromListArrayTest(parameterized.TestCase):
@parameterized.named_parameters(*_COO_FROM_LIST_ARRAY_TEST_CASES)
def testCooFromListArray(
self, list_array, expected_coo, expected_dense_shape, array_types):
for array_type in array_types:
for input_array in [
pa.array(list_array, type=array_type),
# it should work for sliced arrays.
pa.array(list_array + list_array,
type=array_type).slice(0, len(list_array)),
pa.array(list_array + list_array,
type=array_type).slice(len(list_array)),
]:
coo, dense_shape = array_util.CooFromListArray(input_array)
self.assertTrue(coo.type.equals(pa.int64()))
self.assertTrue(dense_shape.type.equals(pa.int64()))
self.assertEqual(expected_coo, coo.to_pylist())
self.assertEqual(expected_dense_shape, dense_shape.to_pylist())
_FILL_NULL_LISTS_TEST_CASES = [
dict(
testcase_name="empty_array",
list_array=[],
value_type=pa.int32(),
fill_with=[0],
expected=[],
),
dict(
testcase_name="only_one_null",
list_array=[None],
value_type=pa.int64(),
fill_with=[0, 1],
expected=[[0, 1]],
),
dict(
testcase_name="no_nulls",
list_array=[[1], [2], [3]],
value_type=pa.int64(),
fill_with=[0],
expected=[[1], [2], [3]],
),
dict(
testcase_name="all_nulls",
list_array=[None, None, None],
value_type=pa.int64(),
fill_with=[0, 1],
expected=[[0, 1], [0, 1], [0, 1]],
),
dict(
testcase_name="nulls_at_end",
list_array=[[1], [2], None],
value_type=pa.int64(),
fill_with=[0, 1],
expected=[[1], [2], [0, 1]],
),
dict(
testcase_name="nulls_at_beginning",
list_array=[None, None, [1]],
value_type=pa.int64(),
fill_with=[],
expected=[[], [], [1]],
),
dict(
testcase_name="nulls_scattered",
list_array=[["a"], ["b"], ["c"], None, ["d"], None, ["e"]],
value_type=pa.large_binary(),
fill_with=["x", "x"],
expected=[["a"], ["b"], ["c"], ["x", "x"], ["d"], ["x", "x"], ["e"]],
)
]
def _cross_named_parameters(*named_parameters_dicts):
result = []
for product in itertools.product(*named_parameters_dicts):
crossed = dict(product[0])
testcase_name = crossed["testcase_name"]
for d in product[1:]:
testcase_name += "_" + d["testcase_name"]
crossed.update(d)
crossed["testcase_name"] = testcase_name
result.append(crossed)
return result
class FillNullListsTest(parameterized.TestCase):
@parameterized.named_parameters(*_cross_named_parameters(
_FILL_NULL_LISTS_TEST_CASES, _LIST_TYPE_PARAMETERS))
def testFillNullLists(
self, list_array, value_type, fill_with, expected, list_type_factory):
actual = array_util.FillNullLists(
pa.array(list_array, type=list_type_factory(value_type)),
pa.array(fill_with, type=value_type))
self.assertTrue(
actual.equals(pa.array(expected, type=list_type_factory(value_type))),
"{} vs {}".format(actual, expected))
def testNonListArray(self):
with self.assertRaisesRegex(RuntimeError, "UNIMPLEMENTED"):
array_util.FillNullLists(pa.array([1, 2, 3]), pa.array([4]))
def testValueTypeDoesNotEqualFillType(self):
with self.assertRaisesRegex(RuntimeError, "to be of the same type"):
array_util.FillNullLists(pa.array([[1]]), pa.array(["a"]))
def _all_false_null_bitmap_size(size):
if pa.__version__ < "0.17":
return size
# starting from arrow 0.17, the array factory won't create a null bitmap if
# no element is null.
# TODO(zhuo): clean up this shim once tfx_bsl supports arrow 0.17+
# exclusively.
return 0
def _get_numeric_byte_size_test_cases():
result = []
for array_type, sizeof in [
(pa.int8(), 1),
(pa.uint8(), 1),
(pa.int16(), 2),
(pa.uint16(), 2),
(pa.int32(), 4),
(pa.uint32(), 4),
(pa.int64(), 8),
(pa.uint64(), 8),
(pa.float32(), 4),
(pa.float64(), 8),
]:
result.append(
dict(
testcase_name=str(array_type),
array=pa.array(range(9), type=array_type),
slice_offset=2,
slice_length=3,
expected_size=(_all_false_null_bitmap_size(2) + sizeof * 9),
expected_sliced_size=(_all_false_null_bitmap_size(1) + sizeof * 3)))
return result
def _get_binary_like_byte_size_test_cases():
result = []
for array_type, sizeof_offsets in [
(pa.binary(), 4),
(pa.string(), 4),
(pa.large_binary(), 8),
(pa.large_string(), 8),
]:
result.append(
dict(
testcase_name=str(array_type),
array=pa.array([
"a", "bb", "ccc", "dddd", "eeeee", "ffffff", "ggggggg",
"hhhhhhhh", "iiiiiiiii"
],
type=array_type),
slice_offset=1,
slice_length=3,
# contents: 45
# offsets: 10 * sizeof_offsets
# null bitmap: 2
expected_size=(45 + sizeof_offsets * 10 +
_all_false_null_bitmap_size(2)),
# contents: 9
# offsets: 4 * sizeof_offsets
# null bitmap: 1
expected_sliced_size=(9 + sizeof_offsets * 4 +
_all_false_null_bitmap_size(1))))
return result
_GET_BYTE_SIZE_TEST_CASES = (
_get_numeric_byte_size_test_cases() +
_get_binary_like_byte_size_test_cases() + [
dict(
testcase_name="bool",
array=pa.array([False] * 9, type=pa.bool_()),
slice_offset=7,
slice_length=1,
# contents: 2
# null bitmap: 2
expected_size=(_all_false_null_bitmap_size(2) + 2),
# contents: 1
# null bitmap: 1
expected_sliced_size=(_all_false_null_bitmap_size(1) + 1)),
dict(
testcase_name="list",
array=pa.array([[1], [1, 1], [1, 1, 1], [1, 1, 1, 1]],
type=pa.list_(pa.int64())),
slice_offset=1,
slice_length=2,
# offsets: 5 * 4
# null bitmap: 1
# contents:
# null bitmap: 2
# contents: 10 * 8
expected_size=(5 * 4 + _all_false_null_bitmap_size(1 + 2) + 10 * 8),
# offsets: 3 * 4
# null bitmap: 1
# contents:
# null bitmap: 1
# contents: 5 * 8
expected_sliced_size=(3 * 4 + _all_false_null_bitmap_size(1 + 1)
+ 5 * 8)),
dict(
testcase_name="large_list",
array=pa.array([[1], [1, 1], [1, 1, 1], [1, 1, 1, 1]],
type=pa.large_list(pa.int64())),
slice_offset=1,
slice_length=2,
# offsets: 5 * 8
# null bitmap: 1
# contents:
# null bitmap: 2
# contents: 10 * 8
expected_size=(5 * 8 + _all_false_null_bitmap_size(1 + 2) + 10 * 8),
# offsets: 3 * 8
# null bitmap: 1
# contents:
# null bitmap: 1
# contents: 5 * 8
expected_sliced_size=(
3 * 8 + _all_false_null_bitmap_size(1 + 1) + 5 * 8)),
dict(
testcase_name="deeply_nested_list",
array=pa.array([[["aaa"], ["bb", ""], None],
None,
[["c"], [], ["def", "g"]],
[["h"]]],
type=pa.list_(pa.list_(pa.binary()))),
slice_offset=1,
slice_length=2,
# innermost binary array: 1 + 11 + 8 * 4
# mid list array: 1 + 8 * 4
# outmost list array: 1 + 5 * 4
expected_size=(97 +
# innermost binary array does not have null
_all_false_null_bitmap_size(1)),
# innermost binary array (["c", "def", "g"]): 1 + 5 + 4 * 4
# mid list array: ([["c"], [], ["def, "g]]): 1 + 4 * 4
# outmost list array: 1 + 3 * 4
expected_sliced_size=(
51 +
# innermost binary array does not have null
_all_false_null_bitmap_size(1))),
dict(
testcase_name="null",
array=pa.array([None] * 1000),
slice_offset=4,
slice_length=100,
expected_size=0,
expected_sliced_size=0),
dict(
testcase_name="struct",
array=pa.array(
[{
"a": 1,
"b": 2
}] * 10,
type=pa.struct(
[pa.field("a", pa.int64()),
pa.field("b", pa.int64())])),
slice_offset=2,
slice_length=1,
expected_size=(_all_false_null_bitmap_size(2) +
(_all_false_null_bitmap_size(2) + 10 * 8) * 2),
expected_sliced_size=(_all_false_null_bitmap_size(1) +
(_all_false_null_bitmap_size(1) + 8) * 2))
])
class GetByteSizeTest(parameterized.TestCase):
@parameterized.named_parameters(*_GET_BYTE_SIZE_TEST_CASES)
def testGetByteSize(self, array, slice_offset, slice_length, expected_size,
expected_sliced_size):
# make sure the empty array case does not crash.
array_util.GetByteSize(pa.array([], array.type))
self.assertEqual(array_util.GetByteSize(array), expected_size)
sliced = array.slice(slice_offset, slice_length)
self.assertEqual(array_util.GetByteSize(sliced), expected_sliced_size)
def testUnsupported(self):
with self.assertRaisesRegex(RuntimeError, "UNIMPLEMENTED"):
array_util.GetByteSize(pa.array([], type=pa.timestamp("s")))
_TO_SINGLETON_LIST_ARRAY_TEST_CASES = [
dict(
testcase_name="empty",
array=pa.array([], type=pa.int32()),
expected_result=pa.array([], type=pa.list_(pa.int32())),
),
dict(
testcase_name="no_null",
array=pa.array([1, 2, 3]),
expected_result=pa.array([[1], [2], [3]]),
),
dict(
testcase_name="all_nulls",
array=pa.array([None, None, None], type=pa.binary()),
expected_result=pa.array([None, None, None],
type=pa.list_(pa.binary())),
),
dict(
testcase_name="some_nulls",
array=pa.array([None, None, 2, 3, None, 4, None, None]),
expected_result=pa.array([None, None, [2], [3], None, [4], None, None]),
),
]
class ToSingletonListArrayTest(parameterized.TestCase):
@parameterized.named_parameters(*_TO_SINGLETON_LIST_ARRAY_TEST_CASES)
def testToSingletonListArray(self, array, expected_result):
result = array_util.ToSingletonListArray(array)
result.validate()
self.assertTrue(
result.equals(expected_result),
"expected: {}; got: {}".format(expected_result, result))
if __name__ == "__main__":
absltest.main()
| 37.224163 | 96 | 0.600594 |
35bf50c60d9148adb0236eba09a416b571711eda | 1,677 | py | Python | aliyun-python-sdk-devops-rdc/aliyunsdkdevops_rdc/request/v20200303/GetPipelineInstanceInfoRequest.py | liuzheng/aliyun-openapi-python-sdk | 1ba6743f3d6f2cef57ec9e3be1754b04293c3150 | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-devops-rdc/aliyunsdkdevops_rdc/request/v20200303/GetPipelineInstanceInfoRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-devops-rdc/aliyunsdkdevops_rdc/request/v20200303/GetPipelineInstanceInfoRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetPipelineInstanceInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'devops-rdc', '2020-03-03', 'GetPipelineInstanceInfo')
self.set_method('POST')
def get_FlowInstanceId(self):
return self.get_body_params().get('FlowInstanceId')
def set_FlowInstanceId(self,FlowInstanceId):
self.add_body_params('FlowInstanceId', FlowInstanceId)
def get_UserPk(self):
return self.get_body_params().get('UserPk')
def set_UserPk(self,UserPk):
self.add_body_params('UserPk', UserPk)
def get_OrgId(self):
return self.get_body_params().get('OrgId')
def set_OrgId(self,OrgId):
self.add_body_params('OrgId', OrgId)
def get_PipelineId(self):
return self.get_body_params().get('PipelineId')
def set_PipelineId(self,PipelineId):
self.add_body_params('PipelineId', PipelineId) | 33.54 | 83 | 0.759094 |
733193d63d6cd075cb413a9d32cf99d43315d09d | 1,467 | py | Python | utils.py | RafayGhafoor/PTCL-Router-CLI | 6d75ba31e356af9c32a3fdb50df83f7cf31f6719 | [
"MIT"
] | 7 | 2018-11-17T07:18:49.000Z | 2020-01-28T17:19:07.000Z | utils.py | RafayGhafoor/PTCL-Router-CLI | 6d75ba31e356af9c32a3fdb50df83f7cf31f6719 | [
"MIT"
] | null | null | null | utils.py | RafayGhafoor/PTCL-Router-CLI | 6d75ba31e356af9c32a3fdb50df83f7cf31f6719 | [
"MIT"
] | 3 | 2018-12-30T15:35:25.000Z | 2020-10-10T22:58:00.000Z | import re
mac_pattern = re.compile(u'^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$')
def validate_gateway(gateway):
if not re.search("https?://", gateway) and not gateway.endswith('/'):
return True
return False
def convert_time(start_time="1", end_time="23:59"):
# TODO : Add test that the numbers after : shouldn't exceed 60 (minutes)
'''
Converts time to minutes.
Takes time and splits it by ":", the first element before ":" is in
hour and the second element is in minutes.
Parameters:
- start_time: start time to apply limit from. Eg: 1:00 (am)
- end_time: end time to apply limit till. Eg: 13:00 (pm)
Return (Integer):
sum of start_time and end_time in format (Hours * 60 + minutes).
Example:
>>> convert_time(13:00, 18:08)
# returns (13 * 60) + 00, (18 * 60) + 08
780, 1080
'''
start_time = [int(i) for i in start_time.split(':')]
end_time = [int(i) for i in end_time.split(':')]
if len(start_time) == 1:
start_time.append(00)
if len(end_time) == 1:
end_time.append(00)
# if end_time[0] > 24 or start_time[0] > 24 or end_time[1] > 60 or start_time[1] > 60:
# raise custom Exception
start_time = (start_time[0] * 60) + start_time[1]
end_time = (end_time[0] * 60) + end_time[1]
return (start_time, end_time) | 35.780488 | 94 | 0.563736 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.