hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4a6961ad31d949e717af2cb053a6215c88efcb5 | 1,365 | py | Python | standard_library/json_ops.py | ariannasg/python3-essential-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | 1 | 2020-06-02T08:37:41.000Z | 2020-06-02T08:37:41.000Z | standard_library/json_ops.py | ariannasg/python3-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | null | null | null | standard_library/json_ops.py | ariannasg/python3-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | null | null | null | #!usr/bin/env python3
# working with JSON data
import json
import urllib.request
# use urllib to retrieve some sample JSON data
req = urllib.request.urlopen("http://httpbin.org/json")
data = req.read().decode('utf-8')
print(data)
# use the JSON module to parse the returned data
obj = json.loads(data)
# when the data is parsed, we can access it like any other object
print(obj["slideshow"]["author"])
for slide in obj["slideshow"]["slides"]:
print(slide["title"])
# python objects can also be written out as JSON
objdata = {
"name": "Joe Marini",
"author": True,
"titles": [
"Learning Python", "Advanced Python",
"Python Standard Library Essential Training"
]
}
# writing the above object as json to a file
with open("jsonoutput.json", "w") as fp:
json.dump(objdata, fp, indent=4)
# CONSOLE OUTPUT:
# {
# "slideshow": {
# "author": "Yours Truly",
# "date": "date of publication",
# "slides": [
# {
# "title": "Wake up to WonderWidgets!",
# "type": "all"
# },
# {
# "items": [
# "Why <em>WonderWidgets</em> are great",
# "Who <em>buys</em> WonderWidgets"
# ],
# "title": "Overview",
# "type": "all"
# }
# ],
# "title": "Sample Slide Show"
# }
# }
#
# Yours Truly
# Wake up to WonderWidgets!
# Overview
| 22.75 | 65 | 0.586813 |
90f4612b34b2d935ffbaabd0c08ce7c6dfb5d703 | 11,952 | py | Python | pea_simulator.py | nbeguier/pea-simulator | ef21cf3574d3a64d642135d7fdb38f2f73a59d60 | [
"MIT"
] | null | null | null | pea_simulator.py | nbeguier/pea-simulator | ef21cf3574d3a64d642135d7fdb38f2f73a59d60 | [
"MIT"
] | null | null | null | pea_simulator.py | nbeguier/pea-simulator | ef21cf3574d3a64d642135d7fdb38f2f73a59d60 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
PEA simulateur
Copyright (c) 2020-2021 Nicolas Beguier
Licensed under the MIT License
Written by Nicolas BEGUIER (nicolas_beguier@hotmail.com)
"""
# Standard library imports
from datetime import datetime
from os.path import exists
import pickle
import sys
# Third party library imports
from dateutil.relativedelta import relativedelta
from tabulate import tabulate
# Debug
# from pdb import set_trace as st
VERSION = '1.3.0'
## VARS
START_DATE = '2019/01/01'
START_MONEY = 1000
BANK_TAX = {
500: 1.95,
2000: 3.9,
3250: '0.2%',
10000: '0.2%',
100000: '0.2%',
150000: '0.2%',
}
SOCIAL_CONTRIBUTIONS = 17.2
GLOBAL_TAX = {
2: 22.5,
5: 19,
99: 0,
}
# MARKET = 'generated'
MARKET = 'cac40'
def compute_tax(price):
"""
Fonction retournant la taxe associée au price d'achat d'actions
"""
for limit in BANK_TAX:
if price < limit:
if isinstance(BANK_TAX[limit], str):
return price*float(BANK_TAX[limit].split('%')[0])/100
return float(BANK_TAX[limit])
return 0
def get_ref_data(ref):
"""
Fonction retournant les données d'une référence d'action
Nom, Secteur, Industrie
"""
cac40_filename = 'references/cac40.txt'
if not exists(cac40_filename):
print('Fichier manquant: {}'.format(cac40_filename))
return 'Unknown', 'Unknown', 'Unknown'
with open(cac40_filename, 'r') as ref_file:
for line in ref_file.readlines():
if line.split(';')[0] == ref:
return line.split(';')[1].split('\n')[0], \
line.split(';')[2].split('\n')[0], \
line.split(';')[3].split('\n')[0]
return 'Unknown', 'Unknown', 'Unknown'
def get_var(ref, price, context, market, var, var_type='percent'):
"""
Fonction retournant la variance des mois précédants
"""
dernier_mois = context['date'] + relativedelta(months=var)
cotations_filename = 'cotations/{}/Cotations{}{:02d}.txt'.format(
market,
dernier_mois.year,
dernier_mois.month)
if not exists(cotations_filename):
print('Fichier manquant: {}'.format(cotations_filename))
return 0
with open(cotations_filename, 'r') as cotations_file:
for line in cotations_file.readlines():
if ref == line.split(';')[0]:
if var_type == 'euro':
return round(float(price) - float(line.split(';')[5]), 2)
return round(100 * (float(price) - float(line.split(';')[5])) / float(price), 2)
return 0
def display_help():
"""
Fonction affichant l'aide
"""
print("[a]chat <ref> <nombre>")
print("[v]ente <ref> <nombre> <id>")
print("[l]iste [<filtre>]")
print("[d]ashboard")
print("[s]uivant: passe au prochain mois")
print("[c]lôture <années ancienneté>")
print("[e]xit")
print("[sauvegarder]")
print("[*]: affiche l'aide")
def list_shares(context, market, filter_str):
"""
Fonction listant les actions disponibles
https://www.abcbourse.com/download/historiques.aspx
"""
listing = list()
cotations_filename = 'cotations/{}/Cotations{}{:02d}.txt'.format(
market,
context['date'].year,
context['date'].month)
if not exists(cotations_filename):
print('Fichier manquant: {}'.format(cotations_filename))
return None
with open(cotations_filename, 'r') as cotations_file:
for line in cotations_file.readlines():
ref = line.split(';')[0]
price = line.split(';')[5]
name, area, industry = get_ref_data(ref)
result = [
name,
ref,
price,
get_var(ref, price, context, market, -1),
get_var(ref, price, context, market, -6),
get_var(ref, price, context, market, -12),
area,
industry,
]
if True in [filter_str.lower() in str(value).lower() for value in result]:
listing.append(result)
print(tabulate(listing, [
'Nom',
'Reference',
'Prix (€)',
'Var 1 mois (%)',
'Var 6 mois (%)',
'Var 1 an (%)',
'Secteur',
'Industrie']))
return None
def list_my_shares(context):
"""
Fonction listant les actions détenues
"""
listing = list()
total_balance = context['balance']
for wallet, share in enumerate(context['shares']):
share_price = get_share_price(share['ref'], context)
share_value = share['num'] * share_price
total_balance += share_value
month_passed = int(round((share['date'] - context['date']).days/30, 0))
var_1_month = 'N.A'
var_6_month = 'N.A'
if month_passed <= -1:
var_1_month = share['num'] * get_var(
share['ref'], share_price, context, MARKET, -1, var_type='euro')
if month_passed <= -6:
var_6_month = share['num'] * get_var(
share['ref'], share_price, context, MARKET, -6, var_type='euro')
listing.append([
wallet,
share['date'],
get_ref_data(share['ref'])[0],
share['ref'],
share['num'],
round(share_value, 2),
var_1_month,
var_6_month,
share['num'] * get_var(
share['ref'], share_price, context, MARKET, month_passed, var_type='euro')
])
print(tabulate(listing, [
'Id',
"Date d'achat",
'Nom',
'Reference',
'Nombre',
'Valeur (€)',
'Plus-value 1 mois (€)',
'Plus-value 6 mois (€)',
'Plus-value (€)']))
return total_balance
def get_share_price(ref, context):
"""
Fonction retournant le price courant d'une référence d'action
"""
markets = [MARKET]
for market in markets:
cotations_filename = 'cotations/{}/Cotations{}{:02d}.txt'.format(
market,
context['date'].year,
context['date'].month)
if not exists(cotations_filename):
print('Fichier manquant: {}'.format(cotations_filename))
continue
with open(cotations_filename, 'r') as cotations_file:
for line in cotations_file.readlines():
if line.split(';')[0] == ref:
return float(line.split(';')[5])
return 0
def buy_share(commande, context):
"""
Fonction d'achat d'action
"""
try:
ref = commande.split(' ')[1]
num = int(commande.split(' ')[2])
except IndexError:
print('Erreur saisie')
display_help()
return context
price = num * get_share_price(ref, context)
price += compute_tax(price)
context['balance'] -= price
context['shares'].append({'ref': ref, 'date': context['date'], 'num': num})
return context
def sell_share(commande, context):
"""
Fonction de vente d'action
"""
try:
ref = commande.split(' ')[1]
num = int(commande.split(' ')[2])
wallet_id = int(commande.split(' ')[3])
except IndexError:
print('Erreur saisie')
display_help()
return context
price = num * get_share_price(ref, context)
price -= compute_tax(price)
share = context['shares'][wallet_id]
if share['ref'] == ref and share['num'] >= num:
context['balance'] += price
share['num'] -= num
return context
def dashboard(context):
"""
Fonction affichant le dashboard d'actions
"""
print('Solde: {}€'.format(round(context['balance'], 2)))
print('Actions')
print('=======')
total_balance = list_my_shares(context)
print('Solde total: {}€'.format(round(total_balance, 2)))
def next_month(context):
"""
Fonction permettant de passer au mois suivant
"""
dividendes_filename = 'dividendes/Dividendes{}{:02d}.txt'.format(
context['date'].year,
context['date'].month)
if exists(dividendes_filename):
dividendes_file = open(dividendes_filename, 'r')
for line in dividendes_file.readlines():
ref = line.split(';')[0]
dividende = line.split(';')[2]
for share in context['shares']:
if share['ref'] == ref:
amount = float(dividende) * float(share['num'])
percent = 100 * float(dividende) / get_share_price(share['ref'], context)
print('Versement de dividendes de {}: {}€, {}%'.format(
get_ref_data(ref)[0],
round(amount, 2),
round(percent, 2)))
context['balance'] += amount
dividendes_file.close()
context['date'] += relativedelta(months=+1)
print('nouvelle date: {}'.format(context['date']))
return context['date']
def closing(context):
"""
Fonction de clôture du PEA
"""
for wallet_id, share in enumerate(context['shares']):
print('Vente de {} x {}'.format(
get_ref_data(share['ref'])[0],
share['num']))
month_passed = int(round((share['date'] - context['date']).days/30, 0))
share_price = get_share_price(share['ref'], context)
capital_gain = share['num'] * get_var(
share['ref'], share_price, context, MARKET, month_passed)
print('-> Plus-value de {}€'.format(capital_gain))
tax = capital_gain * SOCIAL_CONTRIBUTIONS / 100
if capital_gain > 0:
context['balance'] -= tax
print('-> Prélèvement sociaux -{}€'.format(round(tax, 2)))
context = sell_share('v {} {} {}'.format(
share['ref'], share['num'], wallet_id), context)
print('Vous avez {}€'.format(round(context['balance'], 2)))
sys.exit(0)
def save(context):
"""
Fonction sauvegardant la partie
"""
filename = input('Comment nommer la sauvegarde ? [save.txt] ')
if not filename:
filename = 'save.txt'
afile = open(filename, 'wb')
pickle.dump(context, afile)
afile.close()
print('Partie sauvegardée !')
def load(filename):
"""
Fonction chargeant la partie
"""
afile = open(filename, 'rb')
context = pickle.load(afile)
afile.close()
print('Partie chargée !')
return context
def shortcut_options(context, text):
"""
Redirection vers les fonctions associées au raccourci
"""
if text.startswith('a'):
context = buy_share(text, context)
elif text.startswith('v'):
context = sell_share(text, context)
elif text.startswith('l'):
filter_str = ''
if len(text.split(' ')) > 1:
filter_str = text.split(' ')[1]
list_shares(context, MARKET, filter_str)
elif text.startswith('d'):
dashboard(context)
elif text.startswith('sa'):
text = input('Êtes-vous sûr de vouloir sauvegarder ? [y/N] ')
if text.lower() == 'y':
save(context)
elif text.startswith('s'):
context['date'] = next_month(context)
elif text.startswith('c'):
closing(context)
elif text.startswith('e'):
text = input('Êtes-vous sûr de vouloir quitter ? [y/N] ')
if text.lower() == 'y':
sys.exit(0)
else:
display_help()
def main():
"""
Fonction principale
"""
if len(sys.argv) > 1 and exists(sys.argv[1]):
context = load(sys.argv[1])
else:
context = {
'date': datetime.strptime(START_DATE, '%Y/%m/%d'),
'balance': START_MONEY,
'shares': list()
}
display_help()
while True:
text = input('[{date}][{balance}€] > '.format(
date=context['date'],
balance=round(context['balance'], 2)))
shortcut_options(context, text)
return True
if __name__ == '__main__':
main()
| 31.206266 | 96 | 0.562667 |
7e6b5b813fedeb64636238338c9edb76b494ce99 | 107,728 | py | Python | mne/viz/evoked.py | Macquarie-MEG-Research/mne-python | 469c56a8d1c4edb84852816301ecd43e8ff78ebf | [
"BSD-3-Clause"
] | null | null | null | mne/viz/evoked.py | Macquarie-MEG-Research/mne-python | 469c56a8d1c4edb84852816301ecd43e8ff78ebf | [
"BSD-3-Clause"
] | null | null | null | mne/viz/evoked.py | Macquarie-MEG-Research/mne-python | 469c56a8d1c4edb84852816301ecd43e8ff78ebf | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Functions to plot evoked M/EEG data (besides topographies)."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from numbers import Integral
import numpy as np
from ..io.pick import (channel_type,
_VALID_CHANNEL_TYPES, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _pick_inst, _get_channel_types,
_PICK_TYPES_DATA_DICT, _picks_to_idx, pick_info)
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax, _check_cov, _make_combine_callable,
_validate_if_list_of_axes, _triage_rank_sss,
_connection_line, _get_color_list, _setup_ax_spines,
_setup_plot_projector, _prepare_joint_axes, _check_option,
_set_title_multiple_electrodes, _check_time_unit,
_plot_masked_image, _trim_ticks, _set_window_title)
from ..utils import (logger, _clean_names, warn, _pl, verbose, _validate_type,
_check_if_nan, _check_ch_locs, fill_doc, _is_numeric)
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topomap_plot, plot_topomap, _get_pos_outlines,
_draw_outlines, _prepare_topomap, _set_contour_locator,
_check_sphere, _make_head_outlines)
from ..channels.layout import _pair_grad_sensors, find_layout
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([
line is event.artist for line in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False, time_unit='s', sphere=None):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continue
picks, pos, merge_channels, _, ch_type, this_sphere, clip_origin = \
_prepare_topomap_plot(info, ch_type, sphere=sphere)
outlines = _make_head_outlines(this_sphere, pos, 'head', clip_origin)
if len(pos) < 2:
fig.delaxes(axarr[0][idx])
continue
this_data = data[picks, minidx:maxidx]
if merge_channels:
from ..channels.layout import _merge_ch_data
method = 'mean' if psd else 'rms'
this_data, _ = _merge_ch_data(this_data, ch_type, [],
method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False, sphere=this_sphere,
outlines=outlines)
unit = 'Hz' if psd else time_unit
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc, size=30):
"""Plot (possibly colorized) channel legends for evoked plots."""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axis.get_figure().canvas.draw()
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(size / ratio) + '%',
height=str(size) + '%', loc=loc)
ax.set_adjustable('box')
ax.set_aspect('equal')
_prepare_topomap(pos, ax, check_nonzero=False)
pos_x, pos_y = pos.T
ax.scatter(pos_x, pos_y, color=colors, s=size * .8, marker='.', zorder=1)
if bads:
bads = np.array(bads)
ax.scatter(pos_x[bads], pos_y[bads], s=size / 6, marker='.',
color='w', zorder=1)
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
selectable=True, zorder='unsorted',
noise_cov=None, colorbar=True, mask=None, mask_style=None,
mask_cmap=None, mask_alpha=.25, time_unit='s',
show_names=False, group_by=None, sphere=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
# For evoked.plot_image ...
# First input checks for group_by and axes if any of them is not None.
# Either both must be dicts, or neither.
# If the former, the two dicts provide picks and axes to plot them to.
# Then, we call this function recursively for each entry in `group_by`.
if plot_type == "image" and isinstance(group_by, dict):
if axes is None:
axes = dict()
for sel in group_by:
plt.figure()
axes[sel] = plt.axes()
if not isinstance(axes, dict):
raise ValueError("If `group_by` is a dict, `axes` must be "
"a dict of axes or None.")
_validate_if_list_of_axes(list(axes.values()))
remove_xlabels = any([_is_last_row(ax) for ax in axes.values()])
for sel in group_by: # ... we loop over selections
if sel not in axes:
raise ValueError(sel + " present in `group_by`, but not "
"found in `axes`")
ax = axes[sel]
# the unwieldy dict comp below defaults the title to the sel
titles = ({channel_type(evoked.info, idx): sel
for idx in group_by[sel]} if titles is None else titles)
_plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,
proj, xlim, hline, units, scalings, titles,
ax, plot_type, cmap=cmap, gfp=gfp,
window_title=window_title,
selectable=selectable, noise_cov=noise_cov,
colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, time_unit=time_unit,
show_names=show_names,
sphere=sphere)
if remove_xlabels and not _is_last_row(ax):
ax.set_xticklabels([])
ax.set_xlabel("")
ims = [ax.images[0] for ax in axes.values()]
clims = np.array([im.get_clim() for im in ims])
min, max = clims.min(), clims.max()
for im in ims:
im.set_clim(min, max)
figs = [ax.get_figure() for ax in axes.values()]
if len(set(figs)) == 1:
return figs[0]
else:
return figs
elif isinstance(axes, dict):
raise ValueError("If `group_by` is not a dict, "
"`axes` must not be a dict either.")
time_unit, times = _check_time_unit(time_unit, evoked.times)
evoked = evoked.copy() # we modify info
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
_check_option('gfp', gfp, [True, False, 'only'])
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
picks = _picks_to_idx(info, picks, none='all', exclude=())
if len(picks) != len(set(picks)):
raise ValueError("`picks` are not unique. Please remove duplicates.")
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, str) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, str) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError(
'exclude has to be a list of channel names or "bads"')
picks = np.array([pick for pick in picks if pick not in exclude])
types = np.array(_get_channel_types(info, picks), str)
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
fig = None
if axes is None:
fig, axes = plt.subplots(len(ch_types_used), 1)
fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92,
hspace=0.63)
if isinstance(axes, plt.Axes):
axes = [axes]
fig.set_size_inches(6.4, 2 + len(axes))
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if window_title is not None:
_set_window_title(fig, window_title)
if len(axes) != len(ch_types_used):
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), len(ch_types_used),
sorted(ch_types_used)))
_check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))
noise_cov = _check_cov(noise_cov, info)
if proj == 'reconstruct' and noise_cov is not None:
raise ValueError('Cannot use proj="reconstruct" when noise_cov is not '
'None')
projector, whitened_ch_names = _setup_plot_projector(
info, noise_cov, proj=proj is True, nave=evoked.nave)
if len(whitened_ch_names) > 0:
unit = False
if projector is not None:
evoked.data[:] = np.dot(projector, evoked.data)
if proj == 'reconstruct':
evoked = evoked._reconstruct_proj()
if plot_type == 'butterfly':
_plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,
units, scalings, hline, gfp, types, zorder, xlim, ylim,
times, bad_ch_idx, titles, ch_types_used, selectable,
False, line_alpha=1., nave=evoked.nave,
time_unit=time_unit, sphere=sphere)
plt.setp(axes, xlabel='Time (%s)' % time_unit)
elif plot_type == 'image':
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
use_nave = evoked.nave if ai == 0 else None
this_picks = list(picks[types == this_type])
_plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,
units, scalings, times, xlim, ylim, titles,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
nave=use_nave, time_unit=time_unit,
show_names=show_names, ch_names=evoked.ch_names)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')
fig.canvas.draw() # for axes plots update axes.
plt_show(show)
return fig
def _is_last_row(ax):
try:
return ax.get_subplotspec().is_last_row()
except AttributeError: # XXX old mpl
return ax.is_last_row()
def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units,
scalings, hline, gfp, types, zorder, xlim, ylim, times,
bad_ch_idx, titles, ch_types_used, selectable, psd,
line_alpha, nave, time_unit, sphere):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
sphere = _check_sphere(sphere, info)
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == 'grad' and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info('Need more than one channel to make '
'topography for %s. Disabling interactivity.'
% (this_type,))
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
else:
this_scaling = 1. if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = gfp == 'only'
if not gfp_only:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
if spatial_colors is True and not _check_ch_locs(chs):
warn('Channel locations not available. Disabling spatial '
'colors.')
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd,
ax, sphere)
else:
if isinstance(spatial_colors, (tuple, str)):
col = [spatial_colors]
else:
col = ['k']
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
if zorder == 'std':
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == 'unsorted':
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = ('`zorder` must be a function, "std" '
'or "unsorted", not {0}.')
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(times, D[ch_idx], picker=True,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx], alpha=line_alpha,
linewidth=0.5)[0])
line_list[-1].set_pickradius(3.)
if gfp:
if gfp in [True, 'only']:
if this_type == 'eeg':
this_gfp = D.std(axis=0, ddof=0)
label = 'GFP'
else:
this_gfp = np.linalg.norm(D, axis=0) / np.sqrt(len(D))
label = 'RMS'
gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1.,
0.)
this_ylim = ax.get_ylim() if (ylim is None or this_type not in
ylim.keys()) else ylim[this_type]
if gfp_only:
y_offset = 0.
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.2)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3, alpha=line_alpha)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
label, zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
texts.append(ax.text(0, 0, '', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0,
clip_on=True))
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(title=r'%s (%d channel%s)'
% (titles[this_type], len(D), _pl(len(D))))
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = ('grey' if spatial_colors is True else 'r')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
callback_onselect = partial(_line_plot_onselect,
ch_types=ch_types_used, info=info,
data=data, times=times, text=text,
psd=psd, time_unit=time_unit,
sphere=sphere)
blit = False if plt.get_backend() == 'MacOSX' else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax, callback_onselect, 'horizontal', minspan=minspan,
useblit=blit, rectprops=dict(alpha=0.5, facecolor='red'))
def _add_nave(ax, nave):
"""Add nave to axes."""
if nave is not None:
ax.annotate(
r'N$_{\mathrm{ave}}$=%d' % nave, ha='left', va='bottom',
xy=(0, 1), xycoords='axes fraction',
xytext=(0, 5), textcoords='offset pixels')
def _handle_spatial_colors(colors, info, idx, ch_type, psd, ax, sphere):
"""Set up spatial colors."""
used_nm = np.array(_clean_names(info['ch_names']))[idx]
# find indices for bads
bads = [np.where(used_nm == bad)[0][0] for bad in info['bads'] if bad in
used_nm]
pos, outlines = _get_pos_outlines(info, idx, sphere=sphere)
loc = 1 if psd else 2 # Legend in top right for psd plot.
_plot_legend(pos, colors, ax, bads, outlines, loc)
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
# ignore xlim='tight'; happens automatically with `extent` in imshow
xlim = None if xlim == 'tight' else xlim
if xlim is not None:
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = 'Channels' if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
yticks = np.arange(len(picks))
if show_names != 'all':
yticks = np.intersect1d(np.round(ax.get_yticks()).astype(int), yticks)
yticklabels = np.array(ch_names)[picks] if show_names else np.array(picks)
ax.set(yticks=yticks, yticklabels=yticklabels[yticks])
@verbose
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, zorder='unsorted',
selectable=True, noise_cov=None, time_unit='s', sphere=None,
verbose=None):
"""Plot evoked data using butterfly plots.
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
.. note:: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
Y limits for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
%(plot_proj)s
hline : list of float | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot the global field power (GFP) or the root mean square (RMS) of the
data. For MEG data, this will plot the RMS. For EEG, it plots GFP,
i.e. the standard deviation of the signal across channels. The GFP is
equivalent to the RMS of an average-referenced signal.
- ``True``
Plot GFP or RMS (for EEG and MEG, respectively) and traces for all
channels.
- ``'only'``
Plot GFP or RMS (for EEG and MEG, respectively), and omit the
traces for individual channels.
The color of the GFP/RMS trace will be green if
``spatial_colors=False``, and black otherwise.
.. versionchanged:: 0.23
Plot GFP for EEG instead of RMS. Label RMS traces correctly as such.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
zorder : str | callable
Which channels to put in the front or back. Only matters if
``spatial_colors`` is used.
If str, must be ``std`` or ``unsorted`` (defaults to ``unsorted``). If
``std``, data with the lowest standard deviation (weakest effects) will
be put in front so that they are not obscured by those with stronger
effects. If ``unsorted``, channels are z-sorted as in the evoked
instance.
If callable, must take one argument: a numpy array of the same
dimensionality as the evoked raw data; and return a list of
unique integers corresponding to the number of channels.
.. versionadded:: 0.13.0
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
See Also
--------
mne.viz.plot_evoked_white
"""
return _plot_evoked(
evoked=evoked, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title, spatial_colors=spatial_colors,
selectable=selectable, zorder=zorder, noise_cov=noise_cov,
time_unit=time_unit, sphere=sphere)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color | color | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
Y limits for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel is determined by
the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of float | None
The values at which to show a vertical line.
fig_background : None | ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | str | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
background_color : color
Background color. Typically 'k' (black) or 'w' (white; default).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations.
"""
from matplotlib.colors import colorConverter
if not type(evoked) in (tuple, list):
evoked = [evoked]
dark_background = \
np.mean(colorConverter.to_rgb(background_color)) < 0.5
if dark_background:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'w'
else:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'k'
if color is None:
if dark_background:
color = ['w'] + _get_color_list()
else:
color = _get_color_list()
color = color * ((len(evoked) % len(color)) + 1)
color = color[:len(evoked)]
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color,
merge_channels=merge_grads,
legend=legend, axes=axes, show=show,
noise_cov=noise_cov)
@fill_doc
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True,
show=True, clim=None, xlim='tight', proj=False,
units=None, scalings=None, titles=None, axes=None,
cmap='RdBu_r', colorbar=True, mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
time_unit='s', show_names="auto", group_by=None,
sphere=None):
"""Plot evoked data as images.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
This parameter can also be used to set the order the channels
are shown in, as the channel image is sorted by the order of picks.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
Color limits for plots (after scaling has been applied). e.g.
``clim = dict(eeg=[-20, 20])``.
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | dict | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
If ``group_by`` is a dict, this cannot be a list, but it can be a dict
of lists of axes, with the keys matching those of ``group_by``. In that
case, the provided axes will be used for the corresponding groups.
Defaults to ``None``.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ``('RdBu_r', True)``.
Defaults to ``'RdBu_r'``.
colorbar : bool
If True, plot a colorbar. Defaults to True.
.. versionadded:: 0.16
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to ``False`` in the mask are masked (see
``do_mask`` below). Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if 'contour', a contour line is drawn around
the masked areas (``True`` in ``mask``). If 'mask', entries not
``True`` in ``mask`` are shown transparently. If 'both', both a contour
and transparency are used.
If ``None``, defaults to 'both' if ``mask`` is not None, and is ignored
otherwise.
.. versionadded:: 0.16
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``Greys``. Not interactive. Otherwise, as ``cmap``.
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to .25.
.. versionadded:: 0.16
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
show_names : bool | 'auto' | 'all'
Determines if channel names should be plotted on the y axis. If False,
no names are shown. If True, ticks are set automatically by matplotlib
and the corresponding channel names are shown. If "all", all channel
names are shown. If "auto", is set to False if ``picks`` is ``None``,
to ``True`` if ``picks`` contains 25 or more entries, or to "all"
if ``picks`` contains fewer than 25 entries.
group_by : None | dict
If a dict, the values must be picks, and ``axes`` must also be a dict
with matching keys, or None. If ``axes`` is None, one figure and one
axis will be created for each entry in ``group_by``.Then, for each
entry, the picked channels will be plotted to the corresponding axis.
If ``titles`` are None, keys will become plot titles. This is useful
for e.g. ROIs. Each entry must contain only one channel type.
For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
If None, all picked channels are plotted to the same axis.
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim, hline=None,
units=units, scalings=scalings, titles=titles,
axes=axes, plot_type="image", cmap=cmap,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names,
group_by=group_by, sphere=sphere)
def _plot_update_evoked(params, bools):
"""Update the plot evoked lines."""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_ydata(di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
@verbose
def plot_evoked_white(evoked, noise_cov, show=True, rank=None, time_unit='s',
sphere=None, axes=None, verbose=None):
"""Plot whitened evoked response.
Plots the whitened evoked response and the whitened GFP as described in
[1]_. This function is especially useful for investigating noise
covariance properties to determine if data are properly whitened (e.g.,
achieving expected values in line with model assumptions, see Notes below).
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance. Can be a string to load a covariance from disk.
show : bool
Show figure if True.
%(rank_None)s
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
axes : list | None
List of axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
See Also
--------
mne.Evoked.plot
Notes
-----
If baseline signals match the assumption of Gaussian white noise,
values should be centered at 0, and be within 2 standard deviations
(±1.96) for 95%% of the time points. For the global field power (GFP),
we expect it to fluctuate around a value of 1.
If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger
(if ``verbose=True``) for each noise covariance estimator that is passed.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
from ..cov import whiten_evoked, read_cov # recursive import
import matplotlib.pyplot as plt
time_unit, times = _check_time_unit(time_unit, evoked.times)
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
evoked = evoked.copy() # handle ref meg
passive_idx = [idx for idx, proj in enumerate(evoked.info['projs'])
if not proj['active']]
# either applied already or not-- else issue
for idx in passive_idx[::-1]: # reverse order so idx does not change
evoked.del_proj(idx)
evoked.pick_types(ref_meg=False, exclude='bads', **_PICK_TYPES_DATA_DICT)
n_ch_used, rank_list, picks_list, has_sss = _triage_rank_sss(
evoked.info, noise_cov, rank, scalings=None)
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
# get one whitened evoked per cov
evokeds_white = [whiten_evoked(evoked, cov, picks=None, rank=r)
for cov, r in zip(noise_cov, rank_list)]
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power.
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
want_shape = (n_rows, n_columns) if len(noise_cov) > 1 else (n_rows,)
_validate_type(axes, (list, tuple, np.ndarray, None), 'axes')
if axes is None:
_, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
else:
axes = np.array(axes)
for ai, ax in enumerate(axes.flat):
_validate_type(ax, plt.Axes, 'axes.flat[%d]' % (ai,))
if axes.shape != want_shape:
raise ValueError(f'axes must have shape {want_shape}, got '
f'{axes.shape}')
fig = axes.flat[0].figure
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
if any(((n_columns == 1 and n_ch_used >= 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = _handle_default('color', None)
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
# the first is by law the best noise cov, on the left we plot that one.
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False,
time_unit=time_unit)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k',
lw=0.5)
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--', lw=2)
ax.set(title='%s (%d channel%s)'
% (titles_[ch_type], len(picks), _pl(len(picks))))
# Now plot the GFP for all covs if indicated.
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax = ax_gfp[i]
ax.set_title(title if n_columns > 1 else
'Whitened GFP, method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
# Wrap SSS-processed data (MEG) to the mag color
color_ch = 'mag' if ch == 'meg' else ch
ax.plot(times, gfp,
label=label if n_columns > 1 else title,
color=color if n_columns > 1 else ch_colors[color_ch],
lw=0.5)
ax.set(xlabel='Time (%s)' % (time_unit,), ylabel=r'GFP ($\chi^2$)',
xlim=[times[0], times[-1]], ylim=(0, 10))
ax.axhline(1, color='red', linestyle='--', lw=2.)
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
plt_show(show)
return fig
@verbose
def plot_snr_estimate(evoked, inv, show=True, axes=None, verbose=None):
"""Plot a data SNR estimate.
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
axes : instance of Axes | None
The axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
The bluish green line is the SNR determined by the GFP of the whitened
evoked data. The orange line is the SNR estimated based on the mismatch
between the data and the data re-estimated from the regularized inverse.
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv)
_validate_type(axes, (None, plt.Axes))
if axes is None:
_, ax = plt.subplots(1, 1)
else:
ax = axes
del axes
fig = ax.figure
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.axvline(0, color='k', ls=':', lw=1)
ax.axhline(0, color='k', ls=':', lw=1)
# Colors are "bluish green" and "vermilion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
hs = list()
labels = ('Inverse', 'Whitened GFP')
hs.append(ax.plot(
evoked.times, snr_est, color=[0.0, 0.6, 0.5])[0])
hs.append(ax.plot(
evoked.times, snr - 1, color=[0.8, 0.4, 0.0])[0])
ax.set(xlim=lims[:2], ylim=lims[2:], ylabel='SNR',
xlabel='Time (s)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
ax.legend(hs, labels, title='Estimation method')
plt_show(show)
return fig
@fill_doc
def plot_evoked_joint(evoked, times="peaks", title='', picks=None,
exclude=None, show=True, ts_args=None,
topomap_args=None):
"""Plot evoked data as butterfly plot and add topomaps for time points.
.. note:: Axes to plot in can be passed by the user through ``ts_args`` or
``topomap_args``. In that case both ``ts_args`` and
``topomap_args`` axes have to be used. Be aware that when the
axes are provided, their position may be slightly modified.
Parameters
----------
evoked : instance of Evoked
The evoked instance.
times : float | array of float | "auto" | "peaks"
The time point(s) to plot. If ``"auto"``, 5 evenly spaced topographies
between the first and last time instant will be shown. If ``"peaks"``,
finds time points automatically by checking for 3 local maxima in
Global Field Power. Defaults to ``"peaks"``.
title : str | None
The title. If ``None``, suppress printing channel type title. If an
empty string, a default title is created. Defaults to ''. If custom
axes are passed make sure to set ``title=None``, otherwise some of your
axes may be removed during placement of the title axis.
%(picks_all)s
exclude : None | list of str | 'bads'
Channels names to exclude from being shown. If ``'bads'``, the
bad channels are excluded. Defaults to ``None``.
show : bool
Show figure if ``True``. Defaults to ``True``.
ts_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`mne.Evoked.plot` to
style the butterfly plot. If they are not in this dict, the following
defaults are passed: ``spatial_colors=True``, ``zorder='std'``.
``show`` and ``exclude`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:meth:`mne.Evoked.plot_topomap` to style the topomaps.
If it is not in this dict, ``outlines='skirt'`` will be passed.
``show``, ``times``, ``colorbar`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
Returns
-------
fig : instance of matplotlib.figure.Figure | list
The figure object containing the plot. If ``evoked`` has multiple
channel types, a list of figures, one for each channel type, is
returned.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
if ts_args is not None and not isinstance(ts_args, dict):
raise TypeError('ts_args must be dict or None, got type %s'
% (type(ts_args),))
ts_args = dict() if ts_args is None else ts_args.copy()
ts_args['time_unit'], _ = _check_time_unit(
ts_args.get('time_unit', 's'), evoked.times)
topomap_args = dict() if topomap_args is None else topomap_args.copy()
got_axes = False
illegal_args = {"show", 'times', 'exclude'}
for args in (ts_args, topomap_args):
if any((x in args for x in illegal_args)):
raise ValueError("Don't pass any of {} as *_args.".format(
", ".join(list(illegal_args))))
if ("axes" in ts_args) or ("axes" in topomap_args):
if not (("axes" in ts_args) and ("axes" in topomap_args)):
raise ValueError("If one of `ts_args` and `topomap_args` contains "
"'axes', the other must, too.")
_validate_if_list_of_axes([ts_args["axes"]], 1)
n_topomaps = (3 if times is None else len(times)) + 1
_validate_if_list_of_axes(list(topomap_args["axes"]), n_topomaps)
got_axes = True
# channel selection
# simply create a new evoked object with the desired channel selection
# Need to deal with proj before picking to avoid bad projections
proj = topomap_args.get('proj', True)
proj_ts = ts_args.get('proj', True)
if proj_ts != proj:
raise ValueError(
f'topomap_args["proj"] (default True, got {proj}) must match '
f'ts_args["proj"] (default True, got {proj_ts})')
_check_option('topomap_args["proj"]', proj, (True, False, 'reconstruct'))
evoked = evoked.copy()
if proj:
evoked.apply_proj()
if proj == 'reconstruct':
evoked._reconstruct_proj()
topomap_args['proj'] = ts_args['proj'] = False # don't reapply
evoked = _pick_inst(evoked, picks, exclude, copy=False)
info = evoked.info
ch_types = _get_channel_types(info, unique=True, only_data_chs=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
if got_axes:
raise NotImplementedError(
"Currently, passing axes manually (via `ts_args` or "
"`topomap_args`) is not supported for multiple channel types.")
figs = list()
for this_type in ch_types: # pick only the corresponding channel type
ev_ = evoked.copy().pick_channels(
[info['ch_names'][idx] for idx in range(info['nchan'])
if channel_type(info, idx) == this_type])
if len(_get_channel_types(ev_.info, unique=True)) > 1:
raise RuntimeError('Possibly infinite loop due to channel '
'selection problem. This should never '
'happen! Please check your channel types.')
figs.append(
plot_evoked_joint(
ev_, times=times, title=title, show=show, ts_args=ts_args,
exclude=list(), topomap_args=topomap_args))
return figs
# set up time points to show topomaps for
times_sec = _process_times(evoked, times, few=True)
del times
_, times_ts = _check_time_unit(ts_args['time_unit'], times_sec)
# prepare axes for topomap
if not got_axes:
fig, ts_ax, map_ax, cbar_ax = _prepare_joint_axes(len(times_sec),
figsize=(8.0, 4.2))
else:
ts_ax = ts_args["axes"]
del ts_args["axes"]
map_ax = topomap_args["axes"][:-1]
cbar_ax = topomap_args["axes"][-1]
del topomap_args["axes"]
fig = cbar_ax.figure
# butterfly/time series plot
# most of this code is about passing defaults on demand
ts_args_def = dict(picks=None, unit=True, ylim=None, xlim='tight',
proj=False, hline=None, units=None, scalings=None,
titles=None, gfp=False, window_title=None,
spatial_colors=True, zorder='std',
sphere=None)
ts_args_def.update(ts_args)
_plot_evoked(evoked, axes=ts_ax, show=False, plot_type='butterfly',
exclude=[], **ts_args_def)
# handle title
# we use a new axis for the title to handle scaling of plots
old_title = ts_ax.get_title()
ts_ax.set_title('')
if title is not None:
title_ax = plt.subplot(4, 3, 2)
if title == '':
title = old_title
title_ax.text(.5, .5, title, transform=title_ax.transAxes,
horizontalalignment='center',
verticalalignment='center')
title_ax.axis('off')
# topomap
contours = topomap_args.get('contours', 6)
ch_type = ch_types.pop() # set should only contain one element
# Since the data has all the ch_types, we get the limits from the plot.
vmin, vmax = ts_ax.get_ylim()
norm = ch_type == 'grad'
vmin = 0 if norm else vmin
vmin, vmax = _setup_vmin_vmax(evoked.data, vmin, vmax, norm)
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
else:
locator = None
topomap_args_pass = (dict(extrapolate='local') if ch_type == 'seeg'
else dict())
topomap_args_pass.update(topomap_args)
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass['contours'] = contours
evoked.plot_topomap(times=times_sec, axes=map_ax, show=False,
colorbar=False, **topomap_args_pass)
if topomap_args.get('colorbar', True):
from matplotlib import ticker
cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax)
if isinstance(contours, (list, np.ndarray)):
cbar.set_ticks(contours)
else:
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
if not got_axes:
plt.subplots_adjust(left=.1, right=.93, bottom=.14,
top=1. if title is not None else 1.2)
# connection lines
# draw the connection lines between time series and topoplots
lines = [_connection_line(timepoint, fig, ts_ax, map_ax_)
for timepoint, map_ax_ in zip(times_ts, map_ax)]
for line in lines:
fig.lines.append(line)
# mark times in time series plot
for timepoint in times_ts:
ts_ax.axvline(timepoint, color='grey', linestyle='-',
linewidth=1.5, alpha=.66, zorder=0)
# show and return it
plt_show(show)
return fig
###############################################################################
# The following functions are all helpers for plot_compare_evokeds. #
###############################################################################
def _check_loc_legal(loc, what='your choice', default=1):
"""Check if loc is a legal location for MPL subordinate axes."""
true_default = {"legend": 2, "show_sensors": 1}.get(what, default)
if isinstance(loc, (bool, np.bool_)) and loc:
loc = true_default
loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3,
'lower right': 4, 'right': 5, 'center left': 6,
'center right': 7, 'lower center': 8, 'upper center': 9,
'center': 10}
loc_ = loc_dict.get(loc, loc)
if loc_ not in range(11):
raise ValueError(str(loc) + " is not a legal MPL loc, please supply"
"another value for " + what + ".")
return loc_
def _validate_style_keys_pce(styles, conditions, tags):
"""Validate styles dict keys for plot_compare_evokeds."""
styles = deepcopy(styles)
if not set(styles).issubset(tags.union(conditions)):
raise ValueError('The keys in "styles" ({}) must match the keys in '
'"evokeds" ({}).'.format(list(styles), conditions))
# make sure all the keys are in there
for cond in conditions:
if cond not in styles:
styles[cond] = dict()
# deal with matplotlib's synonymous handling of "c" and "color" /
# "ls" and "linestyle" / "lw" and "linewidth"
elif 'c' in styles[cond]:
styles[cond]['color'] = styles[cond].pop('c')
elif 'ls' in styles[cond]:
styles[cond]['linestyle'] = styles[cond].pop('ls')
elif 'lw' in styles[cond]:
styles[cond]['linewidth'] = styles[cond].pop('lw')
# transfer styles from partial-matched entries
for tag in cond.split('/'):
if tag in styles:
styles[cond].update(styles[tag])
# remove the (now transferred) partial-matching style entries
for key in list(styles):
if key not in conditions:
del styles[key]
return styles
def _validate_colors_pce(colors, cmap, conditions, tags):
"""Check and assign colors for plot_compare_evokeds."""
err_suffix = ''
if colors is None:
if cmap is None:
colors = _get_color_list()
err_suffix = ' in the default color cycle'
else:
colors = list(range(len(conditions)))
# convert color list to dict
if isinstance(colors, (list, tuple, np.ndarray)):
if len(conditions) > len(colors):
raise ValueError('Trying to plot {} conditions, but there are only'
' {} colors{}. Please specify colors manually.'
.format(len(conditions), len(colors), err_suffix))
colors = dict(zip(conditions, colors))
# should be a dict by now...
if not isinstance(colors, dict):
raise TypeError('"colors" must be a dict, list, or None; got {}.'
.format(type(colors).__name__))
# validate color dict keys
if not set(colors).issubset(tags.union(conditions)):
raise ValueError('If "colors" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(colors), conditions))
# validate color dict values
color_vals = list(colors.values())
all_numeric = all(_is_numeric(_color) for _color in color_vals)
if cmap is not None and not all_numeric:
raise TypeError('if "cmap" is specified, then "colors" must be '
'None or a (list or dict) of (ints or floats); got {}.'
.format(', '.join(color_vals)))
# convert provided ints to sequential, rank-ordered ints
all_int = all([isinstance(_color, Integral) for _color in color_vals])
if all_int:
colors = deepcopy(colors)
ranks = {val: ix for ix, val in enumerate(sorted(set(color_vals)))}
for key, orig_int in colors.items():
colors[key] = ranks[orig_int]
# if no cmap, convert color ints to real colors
if cmap is None:
color_list = _get_color_list()
for cond, color_int in colors.items():
colors[cond] = color_list[color_int]
# recompute color_vals as a sorted set (we'll need it that way later)
color_vals = set(colors.values())
if all_numeric:
color_vals = sorted(color_vals)
return colors, color_vals
def _validate_cmap_pce(cmap, colors, color_vals):
"""Check and assign colormap for plot_compare_evokeds."""
from matplotlib.cm import get_cmap
from matplotlib.colors import Colormap
all_int = all([isinstance(_color, Integral) for _color in color_vals])
lut = len(color_vals) if all_int else None
colorbar_title = ''
if isinstance(cmap, (list, tuple, np.ndarray)) and len(cmap) == 2:
colorbar_title, cmap = cmap
if isinstance(cmap, str):
cmap = get_cmap(cmap, lut=lut)
elif isinstance(cmap, Colormap) and all_int:
cmap = cmap._resample(lut)
return cmap, colorbar_title
def _validate_linestyles_pce(linestyles, conditions, tags):
"""Check and assign linestyles for plot_compare_evokeds."""
# make linestyles a list if it's not defined
if linestyles is None:
linestyles = [None] * len(conditions) # will get changed to defaults
# convert linestyle list to dict
if isinstance(linestyles, (list, tuple, np.ndarray)):
if len(conditions) > len(linestyles):
raise ValueError('Trying to plot {} conditions, but there are '
'only {} linestyles. Please specify linestyles '
'manually.'
.format(len(conditions), len(linestyles)))
linestyles = dict(zip(conditions, linestyles))
# should be a dict by now...
if not isinstance(linestyles, dict):
raise TypeError('"linestyles" must be a dict, list, or None; got {}.'
.format(type(linestyles).__name__))
# validate linestyle dict keys
if not set(linestyles).issubset(tags.union(conditions)):
raise ValueError('If "linestyles" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(linestyles), conditions))
# normalize linestyle values (so we can accurately count unique linestyles
# later). See https://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template#L131-L133 # noqa
linestyle_map = {'solid': (0, ()),
'dotted': (0, (1., 1.65)),
'dashed': (0, (3.7, 1.6)),
'dashdot': (0, (6.4, 1.6, 1., 1.6)),
'-': (0, ()),
':': (0, (1., 1.65)),
'--': (0, (3.7, 1.6)),
'-.': (0, (6.4, 1.6, 1., 1.6))}
for cond, _ls in linestyles.items():
linestyles[cond] = linestyle_map.get(_ls, _ls)
return linestyles
def _populate_style_dict_pce(condition, condition_styles, style_name,
style_dict, cmap):
"""Transfer styles into condition_styles dict for plot_compare_evokeds."""
defaults = dict(color='gray', linestyle=(0, ())) # (0, ()) == 'solid'
# if condition X doesn't yet have style Y defined:
if condition_styles.get(style_name, None) is None:
# check the style dict for the full condition name
try:
condition_styles[style_name] = style_dict[condition]
# if it's not in there, try the slash-separated condition tags
except KeyError:
for tag in condition.split('/'):
try:
condition_styles[style_name] = style_dict[tag]
# if the tag's not in there, assign a default value (but also
# continue looping in search of a tag that *is* in there)
except KeyError:
condition_styles[style_name] = defaults[style_name]
# if we found a valid tag, keep track of it for colorbar
# legend purposes, and also stop looping (so we don't overwrite
# a valid tag's style with an invalid tag → default style)
else:
if style_name == 'color' and cmap is not None:
condition_styles['cmap_label'] = tag
break
return condition_styles
def _handle_styles_pce(styles, linestyles, colors, cmap, conditions):
"""Check and assign styles for plot_compare_evokeds."""
styles = deepcopy(styles)
# validate style dict structure (doesn't check/assign values yet)
tags = set(tag for cond in conditions for tag in cond.split('/'))
if styles is None:
styles = {cond: dict() for cond in conditions}
styles = _validate_style_keys_pce(styles, conditions, tags)
# validate color dict
colors, color_vals = _validate_colors_pce(colors, cmap, conditions, tags)
all_int = all([isinstance(_color, Integral) for _color in color_vals])
# instantiate cmap
cmap, colorbar_title = _validate_cmap_pce(cmap, colors, color_vals)
# validate linestyles
linestyles = _validate_linestyles_pce(linestyles, conditions, tags)
# prep for colorbar tick handling
colorbar_ticks = None if cmap is None else dict()
# array mapping color integers (indices) to tick locations (array values)
tick_locs = np.linspace(0, 1, 2 * len(color_vals) + 1)[1::2]
# transfer colors/linestyles dicts into styles dict; fall back on defaults
color_and_linestyle = dict(color=colors, linestyle=linestyles)
for cond, cond_styles in styles.items():
for _name, _style in color_and_linestyle.items():
cond_styles = _populate_style_dict_pce(cond, cond_styles, _name,
_style, cmap)
# convert numeric colors into cmap color values; store colorbar ticks
if cmap is not None:
color_number = cond_styles['color']
cond_styles['color'] = cmap(color_number)
tick_loc = tick_locs[color_number] if all_int else color_number
key = cond_styles.pop('cmap_label', cond)
colorbar_ticks[key] = tick_loc
return styles, linestyles, colors, cmap, colorbar_title, colorbar_ticks
def _evoked_sensor_legend(info, picks, ymin, ymax, show_sensors, ax,
sphere):
"""Show sensor legend (location of a set of sensors on the head)."""
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
pos, outlines = _get_pos_outlines(info, picks, sphere=sphere)
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(pos, ["k"] * len(picks), ax, list(), outlines,
show_sensors, size=25)
def _draw_colorbar_pce(ax, colors, cmap, colorbar_title, colorbar_ticks):
"""Draw colorbar for plot_compare_evokeds."""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colorbar import ColorbarBase
from matplotlib.transforms import Bbox
# create colorbar axes
orig_bbox = ax.get_position()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cax.yaxis.tick_right()
cb = ColorbarBase(cax, cmap=cmap, norm=None, orientation='vertical')
cb.set_label(colorbar_title)
# handle ticks
ticks = sorted(set(colorbar_ticks.values()))
ticklabels = [''] * len(ticks)
for label, tick in colorbar_ticks.items():
idx = ticks.index(tick)
if len(ticklabels[idx]): # handle labels with the same color/location
ticklabels[idx] = '\n'.join([ticklabels[idx], label])
else:
ticklabels[idx] = label
assert all(len(label) for label in ticklabels)
cb.set_ticks(ticks)
cb.set_ticklabels(ticklabels)
# shrink colorbar if discrete colors
color_vals = set(colors.values())
if all([isinstance(_color, Integral) for _color in color_vals]):
fig = ax.get_figure()
fig.canvas.draw()
fig_aspect = np.divide(*fig.get_size_inches())
new_bbox = ax.get_position()
cax_width = 0.75 * (orig_bbox.xmax - new_bbox.xmax)
# add extra space for multiline colorbar labels
h_mult = max(2, max([len(label.split('\n')) for label in ticklabels]))
cax_height = len(color_vals) * h_mult * cax_width / fig_aspect
x0 = orig_bbox.xmax - cax_width
y0 = (new_bbox.ymax + new_bbox.ymin - cax_height) / 2
x1 = orig_bbox.xmax
y1 = y0 + cax_height
new_bbox = Bbox([[x0, y0], [x1, y1]])
cax.set_axes_locator(None)
cax.set_position(new_bbox)
def _draw_legend_pce(legend, split_legend, styles, linestyles, colors, cmap,
do_topo, ax):
"""Draw legend for plot_compare_evokeds."""
import matplotlib.lines as mlines
lines = list()
# triage
if split_legend is None:
split_legend = cmap is not None
n_colors = len(set(colors.values()))
n_linestyles = len(set(linestyles.values()))
draw_styles = cmap is None and not split_legend
draw_colors = cmap is None and split_legend and n_colors > 1
draw_linestyles = (cmap is None or split_legend) and n_linestyles > 1
# create the fake lines for the legend
if draw_styles:
for label, cond_styles in styles.items():
line = mlines.Line2D([], [], label=label, **cond_styles)
lines.append(line)
else:
if draw_colors:
for label, color in colors.items():
line = mlines.Line2D([], [], label=label, linestyle='solid',
color=color)
lines.append(line)
if draw_linestyles:
for label, linestyle in linestyles.items():
line = mlines.Line2D([], [], label=label, linestyle=linestyle,
color='black')
lines.append(line)
# legend params
ncol = 1 + (len(lines) // 5)
loc = _check_loc_legal(legend, 'legend')
legend_params = dict(loc=loc, frameon=True, ncol=ncol)
# special placement (above dedicated legend axes) in topoplot
if do_topo and isinstance(legend, bool):
legend_params.update(loc='lower right', bbox_to_anchor=(1, 1))
# draw the legend
if any([draw_styles, draw_colors, draw_linestyles]):
labels = [line.get_label() for line in lines]
ax.legend(lines, labels, **legend_params)
def _draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis, invert_y,
vlines, tmin, tmax, unit, skip_axlabel=True):
"""Position, draw, and truncate axes for plot_compare_evokeds."""
# avoid matplotlib errors
if ymin == ymax:
ymax += 1e-15
if tmin == tmax:
tmax += 1e-9
ax.set_xlim(tmin, tmax)
# for dark backgrounds:
ax.patch.set_alpha(0)
if not np.isfinite([ymin, ymax]).all(): # nothing plotted
return
ax.set_ylim(ymin, ymax)
ybounds = (ymin, ymax)
# determine ymin/ymax for spine truncation
trunc_y = True if truncate_yaxis == 'auto' else truncate_yaxis
if truncate_yaxis:
if isinstance(truncate_yaxis, bool):
# truncate to half the max abs. value and round to a nice-ish
# number. ylims are already symmetric about 0 or have a lower bound
# of 0, so div. by 2 should suffice.
ybounds = np.array([ymin, ymax]) / 2.
precision = 0.25
ybounds = np.round(ybounds / precision) * precision
elif truncate_yaxis == 'auto':
# truncate to existing max/min ticks
ybounds = _trim_ticks(ax.get_yticks(), ymin, ymax)[[0, -1]]
else:
raise ValueError('"truncate_yaxis" must be bool or '
'"auto", got {}'.format(truncate_yaxis))
_setup_ax_spines(ax, vlines, tmin, tmax, ybounds[0], ybounds[1], invert_y,
unit, truncate_xaxis, trunc_y, skip_axlabel)
def _get_data_and_ci(evoked, combine, combine_func, picks, scaling=1,
ci_fun=None):
"""Compute (sensor-aggregated, scaled) time series and possibly CI."""
picks = np.array(picks).flatten()
# apply scalings
data = np.array([evk.data[picks] * scaling for evk in evoked])
# combine across sensors
if combine is not None:
logger.info('combining channels using "{}"'.format(combine))
data = combine_func(data)
# get confidence band
if ci_fun is not None:
ci = ci_fun(data)
# get grand mean across evokeds
data = np.mean(data, axis=0)
_check_if_nan(data)
return (data,) if ci_fun is None else (data, ci)
def _get_ci_function_pce(ci, do_topo=False):
"""Get confidence interval function for plot_compare_evokeds."""
if ci is None:
return None
elif callable(ci):
return ci
elif isinstance(ci, bool) and not ci:
return None
elif isinstance(ci, bool):
ci = 0.95
if isinstance(ci, float):
from ..stats import _ci
method = 'parametric' if do_topo else 'bootstrap'
return partial(_ci, ci=ci, method=method)
else:
raise TypeError('"ci" must be None, bool, float or callable, got {}'
.format(type(ci).__name__))
def _plot_compare_evokeds(ax, data_dict, conditions, times, ci_dict, styles,
title, all_positive, topo):
"""Plot evokeds (to compare them; with CIs) based on a data_dict."""
for condition in conditions:
# plot the actual data ('dat') as a line
dat = data_dict[condition].T
ax.plot(times, dat, zorder=1000, label=condition, clip_on=False,
**styles[condition])
# plot the confidence interval if available
if ci_dict.get(condition, None) is not None:
ci_ = ci_dict[condition]
ax.fill_between(times, ci_[0].flatten(), ci_[1].flatten(),
zorder=9, color=styles[condition]['color'],
alpha=0.3, clip_on=False)
if topo:
ax.text(-.1, 1, title, transform=ax.transAxes)
else:
ax.set_title(title)
def _title_helper_pce(title, picked_types, picks, ch_names, combine):
"""Format title for plot_compare_evokeds."""
if title is None:
title = (_handle_default('titles').get(picks, None) if picked_types
else _set_title_multiple_electrodes(title, combine, ch_names))
# add the `combine` modifier
do_combine = picked_types or len(ch_names) > 1
if (title is not None and len(title) and isinstance(combine, str) and
do_combine):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += ' ({})'.format(_comb)
return title
@fill_doc
def plot_compare_evokeds(evokeds, picks=None, colors=None,
linestyles=None, styles=None, cmap=None,
vlines='auto', ci=True, truncate_yaxis='auto',
truncate_xaxis=True, ylim=None, invert_y=False,
show_sensors=None, legend=True,
split_legend=None, axes=None, title=None, show=True,
combine=None, sphere=None):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a list of Evokeds, the contents are plotted with their
``.comment`` attributes used as condition labels. If no comment is set,
the index of the respective Evoked the list will be used instead,
starting with ``1`` for the first Evoked.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
%(picks_all_data)s
* If picks is None or a (collection of) data channel types, the
global field power will be plotted for all data channels.
Otherwise, picks will be averaged.
* If multiple channel types are selected, one
figure will be returned for each channel type.
* If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
colors : list | dict | None
Colors to use when plotting the ERP/F lines and confidence bands. If
``cmap`` is not ``None``, ``colors`` must be a :class:`list` or
:class:`dict` of :class:`ints <int>` or :class:`floats <float>`
indicating steps or percentiles (respectively) along the colormap. If
``cmap`` is ``None``, list elements or dict values of ``colors`` must
be :class:`ints <int>` or valid :doc:`matplotlib colors
<tutorials/colors/colors>`; lists are cycled through sequentially,
while dicts must have keys matching the keys or conditions of an
``evokeds`` dict (see Notes for details). If ``None``, the current
:doc:`matplotlib color cycle <gallery/color/color_cycle_default>` is
used. Defaults to ``None``.
linestyles : list | dict | None
Styles to use when plotting the ERP/F lines. If a :class:`list` or
:class:`dict`, elements must be valid :doc:`matplotlib linestyles
<matplotlib:gallery/lines_bars_and_markers/linestyles>`. Lists are
cycled through sequentially; dictionaries must have keys matching the
keys or conditions of an ``evokeds`` dict (see Notes for details). If
``None``, all lines will be solid. Defaults to ``None``.
styles : dict | None
Dictionary of styles to use when plotting ERP/F lines. Keys must match
keys or conditions of ``evokeds``, and values must be a :class:`dict`
of legal inputs to :func:`matplotlib.pyplot.plot`. Those values will be
passed as parameters to the line plot call of the corresponding
condition, overriding defaults (e.g.,
``styles={"Aud/L": {"linewidth": 3}}`` will set the linewidth for
"Aud/L" to 3). As with ``colors`` and ``linestyles``, keys matching
conditions in ``/``-separated ``evokeds`` keys are supported (see Notes
for details).
cmap : None | str | tuple | instance of matplotlib.colors.Colormap
Colormap from which to draw color values when plotting the ERP/F lines
and confidence bands. If not ``None``, ints or floats in the ``colors``
parameter are mapped to steps or percentiles (respectively) along the
colormap. If ``cmap`` is a :class:`str`, it will be passed to
:func:`matplotlib.cm.get_cmap`; if ``cmap`` is a tuple, its first
element will be used as a string to label the colorbar, and its
second element will be passed to :func:`matplotlib.cm.get_cmap` (unless
it is already an instance of :class:`~matplotlib.colors.Colormap`).
.. versionchanged:: 0.19
Support for passing :class:`~matplotlib.colors.Colormap` instances.
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0. If an empty list is passed,
no vertical lines are plotted.
ci : float | bool | callable | None
Confidence band around each ERP/F time series. If ``False`` or ``None``
no confidence band is drawn. If :class:`float`, ``ci`` must be between
0 and 1, and will set the threshold for a bootstrap
(single plot)/parametric (when ``axes=='topo'``) estimation of the
confidence band; ``True`` is equivalent to setting a threshold of 0.95
(i.e., the 95%% confidence band is drawn). If a callable, it must take
a single array (n_observations × n_times) as input and return upper and
lower confidence margins (2 × n_times). Defaults to ``True``.
truncate_yaxis : bool | 'auto'
Whether to shorten the y-axis spine. If 'auto', the spine is truncated
at the minimum and maximum ticks. If ``True``, it is truncated at the
multiple of 0.25 nearest to half the maximum absolute value of the
data. If ``truncate_xaxis=False``, only the far bound of the y-axis
will be truncated. Defaults to 'auto'.
truncate_xaxis : bool
Whether to shorten the x-axis spine. If ``True``, the spine is
truncated at the minimum and maximum ticks. If
``truncate_yaxis=False``, only the far bound of the x-axis will be
truncated. Defaults to ``True``.
ylim : dict | None
Y-axis limits for plots (after scaling has been applied). :class:`dict`
keys should match channel types; valid keys are eeg, mag, grad, misc
(example: ``ylim=dict(eeg=[-20, 20])``). If ``None``, the y-axis limits
will be set automatically by matplotlib. Defaults to ``None``.
invert_y : bool
Whether to plot negative values upward (as is sometimes done
for ERPs out of tradition). Defaults to ``False``.
show_sensors : bool | int | str | None
Whether to display an inset showing sensor locations on a head outline.
If :class:`int` or :class:`str`, indicates position of the inset (see
:func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`). If ``None``,
treated as ``True`` if there is only one channel in ``picks``. If
``True``, location is upper or lower right corner, depending on data
values. Defaults to ``None``.
legend : bool | int | str
Whether to show a legend for the colors/linestyles of the conditions
plotted. If :class:`int` or :class:`str`, indicates position of the
legend (see :func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`).
If ``True``, equivalent to ``'upper left'``. Defaults to ``True``.
split_legend : bool | None
Whether to separate color and linestyle in the legend. If ``None``,
a separate linestyle legend will still be shown if ``cmap`` is
specified. Defaults to ``None``.
axes : None | Axes instance | list of Axes | 'topo'
:class:`~matplotlib.axes.Axes` object to plot into. If plotting
multiple channel types (or multiple channels when ``combine=None``),
``axes`` should be a list of appropriate length containing
:class:`~matplotlib.axes.Axes` objects. If ``'topo'``, a new
:class:`~matplotlib.figure.Figure` is created with one axis for each
channel, in a topographical layout. If ``None``, a new
:class:`~matplotlib.figure.Figure` is created for each channel type.
Defaults to ``None``.
title : str | None
Title printed above the plot. If ``None``, a title will be
automatically generated based on channel name(s) or type(s) and the
value of the ``combine`` parameter. Defaults to ``None``.
show : bool
Whether to show the figure. Defaults to ``True``.
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_evokeds, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``picks`` is a single channel (not channel type) or
``axes='topo'``, in which cases no combining is performed. Defaults to
``None``.
%(topomap_sphere_auto)s
Returns
-------
fig : list of Figure instances
A list of the figure(s) generated.
Notes
-----
If the parameters ``styles``, ``colors``, or ``linestyles`` are passed as
:class:`dicts <python:dict>`, then ``evokeds`` must also be a
:class:`python:dict`, and
the keys of the plot-style parameters must either match the keys of
``evokeds``, or match a ``/``-separated partial key ("condition") of
``evokeds``. For example, if evokeds has keys "Aud/L", "Aud/R", "Vis/L",
and "Vis/R", then ``linestyles=dict(L='--', R='-')`` will plot both Aud/L
and Vis/L conditions with dashed lines and both Aud/R and Vis/R conditions
with solid lines. Similarly, ``colors=dict(Aud='r', Vis='b')`` will plot
Aud/L and Aud/R conditions red and Vis/L and Vis/R conditions blue.
Color specification depends on whether a colormap has been provided in the
``cmap`` parameter. The following table summarizes how the ``colors``
parameter is interpreted:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+-------------+----------------+------------------------------------------+
| ``cmap`` | ``colors`` | result |
+=============+================+==========================================+
| | None | matplotlib default color cycle; unique |
| | | color for each condition |
| +----------------+------------------------------------------+
| | | matplotlib default color cycle; lowest |
| | list or dict | integer mapped to first cycle color; |
| | of integers | conditions with same integer get same |
| None | | color; unspecified conditions are "gray" |
| +----------------+------------------------------------------+
| | list or dict | ``ValueError`` |
| | of floats | |
| +----------------+------------------------------------------+
| | list or dict | the specified hex colors; unspecified |
| | of hexadecimal | conditions are "gray" |
| | color strings | |
+-------------+----------------+------------------------------------------+
| | None | equally spaced colors on the colormap; |
| | | unique color for each condition |
| +----------------+------------------------------------------+
| | | equally spaced colors on the colormap; |
| | list or dict | lowest integer mapped to first cycle |
| string or | of integers | color; conditions with same integer |
| instance of | | get same color |
| matplotlib +----------------+------------------------------------------+
| Colormap | list or dict | floats mapped to corresponding colormap |
| | of floats | values |
| +----------------+------------------------------------------+
| | list or dict | |
| | of hexadecimal | ``TypeError`` |
| | color strings | |
+-------------+----------------+------------------------------------------+
"""
import matplotlib.pyplot as plt
from ..evoked import Evoked, _check_evokeds_ch_names_times
# build up evokeds into a dict, if it's not already
if isinstance(evokeds, Evoked):
evokeds = [evokeds]
if isinstance(evokeds, (list, tuple)):
evokeds_copy = evokeds.copy()
evokeds = dict()
comments = [getattr(_evk, 'comment', None) for _evk in evokeds_copy]
for idx, (comment, _evoked) in enumerate(zip(comments, evokeds_copy)):
key = str(idx + 1)
if comment: # only update key if comment is non-empty
if comments.count(comment) == 1: # comment is unique
key = comment
else: # comment is non-unique: prepend index
key = f'{key}: {comment}'
evokeds[key] = _evoked
del evokeds_copy
if not isinstance(evokeds, dict):
raise TypeError('"evokeds" must be a dict, list, or instance of '
'mne.Evoked; got {}'.format(type(evokeds).__name__))
evokeds = deepcopy(evokeds) # avoid modifying dict outside function scope
for cond, evoked in evokeds.items():
_validate_type(cond, 'str', 'Conditions')
if isinstance(evoked, Evoked):
evokeds[cond] = [evoked] # wrap singleton evokeds in a list
for evk in evokeds[cond]:
_validate_type(evk, Evoked, 'All evokeds entries ', 'Evoked')
# ensure same channels and times across all evokeds
all_evoked = sum(evokeds.values(), [])
_check_evokeds_ch_names_times(all_evoked)
del all_evoked
# get some representative info
conditions = list(evokeds)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
sphere = _check_sphere(sphere, info)
tmin, tmax = times[0], times[-1]
# set some defaults
if ylim is None:
ylim = dict()
if vlines == 'auto':
vlines = [0.] if (tmin < 0 < tmax) else []
_validate_type(vlines, (list, tuple), 'vlines', 'list or tuple')
# is picks a channel type (or None)?
orig_picks = deepcopy(picks)
picks, picked_types = _picks_to_idx(info, picks, return_kind=True)
# some things that depend on picks:
ch_names = np.array(one_evoked.ch_names)[picks].tolist()
ch_types = list(_get_channel_types(info, picks=picks, unique=True)
.intersection(_DATA_CH_TYPES_SPLIT + ('misc',))) # miscICA
picks_by_type = channel_indices_by_type(info, picks)
# discard picks from non-data channels (e.g., ref_meg)
good_picks = sum([picks_by_type[ch_type] for ch_type in ch_types], [])
picks = np.intersect1d(picks, good_picks)
if show_sensors is None:
show_sensors = (len(picks) == 1)
# cannot combine a single channel
if (len(picks) < 2) and combine is not None:
warn('Only {} channel in "picks"; cannot combine by method "{}".'
.format(len(picks), combine))
# `combine` defaults to GFP unless picked a single channel or axes='topo'
if combine is None and len(picks) > 1 and axes != 'topo':
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# title
title = _title_helper_pce(title, picked_types, picks=orig_picks,
ch_names=ch_names, combine=combine)
# setup axes
do_topo = (axes == 'topo')
if do_topo:
show_sensors = False
if len(picks) > 70:
logger.info('You are plotting to a topographical layout with >70 '
'sensors. This can be extremely slow. Consider using '
'mne.viz.plot_topo, which is optimized for speed.')
axes = ['topo'] * len(ch_types)
else:
if axes is None:
axes = (plt.subplots(figsize=(8, 6))[1] for _ in ch_types)
elif isinstance(axes, plt.Axes):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
if len(ch_types) > 1:
logger.info('Multiple channel types selected, returning one figure '
'per type.')
figs = list()
for ch_type, ax in zip(ch_types, axes):
_picks = picks_by_type[ch_type]
_ch_names = np.array(one_evoked.ch_names)[_picks].tolist()
_picks = ch_type if picked_types else _picks
# don't pass `combine` here; title will run through this helper
# function a second time & it will get added then
_title = _title_helper_pce(title, picked_types, picks=_picks,
ch_names=_ch_names, combine=None)
figs.extend(plot_compare_evokeds(
evokeds, picks=_picks, colors=colors, cmap=cmap,
linestyles=linestyles, styles=styles, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, ylim=ylim, invert_y=invert_y,
legend=legend, show_sensors=show_sensors,
axes=ax, title=_title, split_legend=split_legend, show=show,
sphere=sphere))
return figs
# colors and colormap. This yields a `styles` dict with one entry per
# condition, specifying at least color and linestyle. THIS MUST BE DONE
# AFTER THE "MULTIPLE CHANNEL TYPES" LOOP
(_styles, _linestyles, _colors, _cmap, colorbar_title,
colorbar_ticks) = _handle_styles_pce(styles, linestyles, colors, cmap,
conditions)
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
# some things that depend on ch_type:
units = _handle_default('units')[ch_type]
scalings = _handle_default('scalings')[ch_type]
# prep for topo
pos_picks = picks # need this version of picks for sensor location inset
info = pick_info(info, sel=picks, copy=True)
all_ch_names = info['ch_names']
if not do_topo:
# add vacuous "index" (needed for topo) so same code works for both
axes = [(ax, 0) for ax in axes]
if np.array(picks).ndim < 2:
picks = [picks] # enables zipping w/ axes
else:
from .topo import iter_topography
fig = plt.figure(figsize=(18, 14))
def click_func(
ax_, pick_, evokeds=evokeds, colors=colors,
linestyles=linestyles, styles=styles, cmap=cmap, vlines=vlines,
ci=ci, truncate_yaxis=truncate_yaxis,
truncate_xaxis=truncate_xaxis, ylim=ylim, invert_y=invert_y,
show_sensors=show_sensors, legend=legend,
split_legend=split_legend, picks=picks, combine=combine):
plot_compare_evokeds(
evokeds=evokeds, colors=colors, linestyles=linestyles,
styles=styles, cmap=cmap, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, truncate_xaxis=truncate_xaxis,
ylim=ylim, invert_y=invert_y, show_sensors=show_sensors,
legend=legend, split_legend=split_legend,
picks=picks[pick_], combine=combine, axes=ax_, show=True,
sphere=sphere)
layout = find_layout(info)
# shift everything to the right by 15% of one axes width
layout.pos[:, 0] += layout.pos[0, 2] * .15
layout.pos[:, 1] += layout.pos[0, 3] * .15
# `axes` will be a list of (axis_object, channel_index) tuples
axes = list(iter_topography(
info, layout=layout, on_pick=click_func,
fig=fig, fig_facecolor='w', axis_facecolor='w',
axis_spinecolor='k', layout_scale=.925, legend=True))
picks = list(picks)
del info
# for each axis, compute the grand average and (maybe) the CI
# (per sensor if topo, otherwise aggregating over sensors)
c_func = None if do_topo else combine_func
all_data = list()
all_cis = list()
for _picks, (ax, idx) in zip(picks, axes):
data_dict = dict()
ci_dict = dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# skip CIs when possible; assign ci_fun first to get arg checking
ci_fun = _get_ci_function_pce(ci, do_topo=do_topo)
ci_fun = ci_fun if len(this_evokeds) > 1 else None
res = _get_data_and_ci(this_evokeds, combine, c_func, picks=_picks,
scaling=scalings, ci_fun=ci_fun)
data_dict[cond] = res[0]
if ci_fun is not None:
ci_dict[cond] = res[1]
all_data.append(data_dict) # grand means, or indiv. sensors if do_topo
all_cis.append(ci_dict)
del evokeds
# compute ylims
allvalues = list()
for _dict in all_data:
for _array in list(_dict.values()):
allvalues.append(_array[np.newaxis]) # to get same .ndim as CIs
for _dict in all_cis:
allvalues.extend(list(_dict.values()))
allvalues = np.concatenate(allvalues)
norm = np.all(allvalues > 0)
orig_ymin, orig_ymax = ylim.get(ch_type, [None, None])
ymin, ymax = _setup_vmin_vmax(allvalues, orig_ymin, orig_ymax, norm)
del allvalues
# add empty data and title for the legend axis
if do_topo:
all_data.append({cond: np.array([]) for cond in data_dict})
all_cis.append({cond: None for cond in ci_dict})
all_ch_names.append('')
# plot!
for (ax, idx), data, cis in zip(axes, all_data, all_cis):
if do_topo:
title = all_ch_names[idx]
# plot the data
_times = [] if idx == -1 else times
_plot_compare_evokeds(ax, data, conditions, _times, cis, _styles,
title, norm, do_topo)
# draw axes & vlines
skip_axlabel = do_topo and (idx != -1)
_draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis,
invert_y, vlines, tmin, tmax, units, skip_axlabel)
# add inset scalp plot showing location of sensors picked
if show_sensors:
_validate_type(show_sensors, (np.int64, bool, str, type(None)),
'show_sensors', 'numeric, str, None or bool')
if not _check_ch_locs(np.array(one_evoked.info['chs'])[pos_picks]):
warn('Cannot find channel coordinates in the supplied Evokeds. '
'Not showing channel locations.')
else:
_evoked_sensor_legend(one_evoked.info, pos_picks, ymin, ymax,
show_sensors, ax, sphere)
# add color/linestyle/colormap legend(s)
if legend:
_draw_legend_pce(legend, split_legend, _styles, _linestyles, _colors,
_cmap, do_topo, ax)
if cmap is not None:
_draw_colorbar_pce(ax, _colors, _cmap, colorbar_title, colorbar_ticks)
# finish
plt_show(show)
return [ax.figure]
| 44.942845 | 110 | 0.593727 |
cb4f1d565a9e3e59a34869aa82590392569d4e24 | 2,591 | py | Python | src/test/tests/simulation/updateplots.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/test/tests/simulation/updateplots.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/test/tests/simulation/updateplots.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: updateplots.py
#
# Tests: libsim - connecting to simulation and retrieving data from it.
#
# Programmer: Kathleen Biagas
# Date: June 18, 2014
#
# Modifications:
# Kathleen Biagas, Fri Sep 10 09:37:11 PDT 2021
# Added test for exporting vtk.
#
# ----------------------------------------------------------------------------
# Create our simulation object.
sim = TestSimulation("updateplots", "updateplots.sim2")
sim.addargument("-echo")
# Test that we can start and connect to the simulation.
started, connected = TestSimStartAndConnect("updateplots00", sim)
def step(sim):
sim.consolecommand("step")
# Read from stderr to look for the echoed command. Sync.
keepGoing = True
while keepGoing:
buf = sim.p.stderr.readline()
print(buf)
if "Command 'step'" in buf:
keepGoing = False
def testExportVTK(sim):
# default export FileFormat for VTK is Legacy ascii (.vtk extension),
# Test an export that sets the FileFormat to XML Binary (.vtr extension)
sim.consolecommand("exportVTK")
# Read from stderr to look for the echoed command. Sync.
keepGoing = True
while keepGoing:
buf = sim.p.stderr.readline()
print(buf)
if "Command 'exportVTK'" in buf:
keepGoing = False
TestValueEQ("updateplots_export0000.vtr exists",
os.path.isfile(os.path.join(TestEnv.params["run_dir"], "updateplots_export0000.vtr")),
True)
# Perform our tests.
if connected:
# Make sure the metadata is right.
TestSimMetaData("updateplots01", sim.metadata())
# 2d mesh and updateplotss
#AddPlot("Mesh", "mesh2d")
AddPlot("Pseudocolor", "zonal")
AddPlot("Vector", "zvec")
VectorAtts = VectorAttributes()
VectorAtts.scale = 0.5
VectorAtts.colorByMagnitude = 0
VectorAtts.vectorColor = (255, 255, 255, 255)
SetPlotOptions(VectorAtts)
DrawPlots()
Test("updateplots02")
i = 3
times = "Times:\n"
Query("Time")
times = times + str(GetQueryOutputValue()) + "\n"
for outer in range(6):
for inner in range(3):
step(sim)
Query("Time")
times = times + str(GetQueryOutputValue()) + "\n"
Test("updateplots%02d"%i)
i = i+1
TestText("updateplots%02d"%i, times)
# Uncomment this when #17008 is fixed (crash when Logging ExportDBRPC)
#testExportVTK(sim)
# Close down the simulation.
if started:
sim.endsim()
Exit()
| 29.11236 | 95 | 0.607487 |
ab59cbcb55f34e3eaae0de5c84eb6437c3a99e9b | 179 | py | Python | while_loops/prime numbers .py | amanchamola/while_loops | 39a2ba01ff3ad71990e7c19660590f2f17f5b857 | [
"MIT"
] | null | null | null | while_loops/prime numbers .py | amanchamola/while_loops | 39a2ba01ff3ad71990e7c19660590f2f17f5b857 | [
"MIT"
] | null | null | null | while_loops/prime numbers .py | amanchamola/while_loops | 39a2ba01ff3ad71990e7c19660590f2f17f5b857 | [
"MIT"
] | null | null | null | n=int(input())
d=2
flag = False
while n>d :
if n%d==0 :
flag = True
d+=1
if flag:
print ("not prime")
else:
print ("prime")
| 9.944444 | 24 | 0.418994 |
ff80da7b126ec121013c0f1a5c3d9c306a491f75 | 10,038 | py | Python | edward2/tensorflow/layers/normalization.py | debbiemarkslab/edward2 | d071268c0439b434b508fe23abd938a5effb7a70 | [
"Apache-2.0"
] | null | null | null | edward2/tensorflow/layers/normalization.py | debbiemarkslab/edward2 | d071268c0439b434b508fe23abd938a5effb7a70 | [
"Apache-2.0"
] | null | null | null | edward2/tensorflow/layers/normalization.py | debbiemarkslab/edward2 | d071268c0439b434b508fe23abd938a5effb7a70 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization layers."""
from edward2.tensorflow import random_variable
from edward2.tensorflow import transformed_random_variable
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
class ActNorm(tf.keras.layers.Layer):
"""Actnorm, an affine reversible layer (Prafulla and Kingma, 2018).
Weights use data-dependent initialization in which outputs have zero mean
and unit variance per channel (last dimension). The mean/variance statistics
are computed from the first batch of inputs.
"""
def __init__(self, epsilon=tf.keras.backend.epsilon(), **kwargs):
super(ActNorm, self).__init__(**kwargs)
self.epsilon = epsilon
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
last_dim = input_shape[-1]
if last_dim is None:
raise ValueError('The last dimension of the inputs to `ActNorm` '
'should be defined. Found `None`.')
bias = self.add_weight('bias', [last_dim], dtype=self.dtype)
log_scale = self.add_weight('log_scale', [last_dim], dtype=self.dtype)
# Set data-dependent initializers.
bias = bias.assign(self.bias_initial_value)
with tf.control_dependencies([bias]):
self.bias = bias
log_scale = log_scale.assign(self.log_scale_initial_value)
with tf.control_dependencies([log_scale]):
self.log_scale = log_scale
self.built = True
def __call__(self, inputs, *args, **kwargs):
if not self.built:
mean, variance = tf.nn.moments(
inputs, axes=list(range(inputs.shape.ndims - 1)))
self.bias_initial_value = -mean
# TODO(trandustin): Optionally, actnorm multiplies log_scale by a fixed
# log_scale factor (e.g., 3.) and initializes by
# initial_value / log_scale_factor.
self.log_scale_initial_value = tf.math.log(
1. / (tf.sqrt(variance) + self.epsilon))
if not isinstance(inputs, random_variable.RandomVariable):
return super(ActNorm, self).__call__(inputs, *args, **kwargs)
return transformed_random_variable.TransformedRandomVariable(inputs, self)
def call(self, inputs):
return (inputs + self.bias) * tf.exp(self.log_scale)
def reverse(self, inputs):
return inputs * tf.exp(-self.log_scale) - self.bias
def log_det_jacobian(self, inputs):
"""Returns log det | dx / dy | = num_events * sum log | scale |."""
# Number of events is number of all elements excluding the batch and
# channel dimensions.
num_events = tf.reduce_prod(tf.shape(inputs)[1:-1])
log_det_jacobian = num_events * tf.reduce_sum(self.log_scale)
return log_det_jacobian
def ensemble_batchnorm(x, ensemble_size=1, use_tpu=True, **kwargs):
"""A modified batch norm layer for Batch Ensemble model.
Args:
x: input tensor.
ensemble_size: number of ensemble members.
use_tpu: whether the model is running on TPU.
**kwargs: Keyword arguments to batch normalization layers.
Returns:
Output tensor for the block.
"""
# In BatchEnsemble inference stage, the input to the model is tiled which
# leads to dynamic shape because of the tf.split function. Such operation
# is not supported in tf2.0 on TPU. For current workaround, we use single
# BatchNormalization layer for all ensemble member. This is not correct in
# math but works in practice.
if ensemble_size == 1 or use_tpu:
return tf.keras.layers.BatchNormalization(**kwargs)(x)
name = kwargs.get('name')
split_inputs = tf.split(x, ensemble_size, axis=0)
for i in range(ensemble_size):
if name is not None:
kwargs['name'] = name + '_{}'.format(i)
split_inputs[i] = tf.keras.layers.BatchNormalization(**kwargs)(
split_inputs[i])
return tf.concat(split_inputs, axis=0)
class EnsembleSyncBatchNorm(tf.keras.layers.Layer):
"""BatchNorm that averages over ALL replicas. Only works for `NHWC` inputs."""
def __init__(self, axis=3, ensemble_size=1, momentum=0.99, epsilon=0.001,
trainable=True, name=None, **kwargs):
super(EnsembleSyncBatchNorm, self).__init__(
trainable=trainable, name=name, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.ensemble_size = ensemble_size
def build(self, input_shape):
"""Build function."""
dim = input_shape[-1]
if self.ensemble_size > 1:
shape = [self.ensemble_size, dim]
else:
shape = [dim]
self.gamma = self.add_weight(
name='gamma',
shape=shape,
dtype=self.dtype,
initializer='ones',
trainable=True)
self.beta = self.add_weight(
name='beta',
shape=shape,
dtype=self.dtype,
initializer='zeros',
trainable=True)
self.moving_mean = self.add_weight(
name='moving_mean',
shape=shape,
dtype=self.dtype,
initializer='zeros',
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=shape,
dtype=self.dtype,
initializer='ones',
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
def _get_mean_and_variance(self, x):
"""Cross-replica mean and variance."""
replica_context = tf.distribute.get_replica_context()
if replica_context is not None:
num_replicas_in_sync = replica_context.num_replicas_in_sync
if num_replicas_in_sync <= 8:
group_assignment = None
num_replicas_per_group = tf.cast(num_replicas_in_sync, tf.float32)
else:
num_replicas_per_group = max(8, num_replicas_in_sync // 8)
group_assignment = np.arange(num_replicas_in_sync, dtype=np.int32)
group_assignment = group_assignment.reshape(
[-1, num_replicas_per_group])
group_assignment = group_assignment.tolist()
num_replicas_per_group = tf.cast(num_replicas_per_group, tf.float32)
# This only supports NHWC format.
if self.ensemble_size > 1:
height = tf.shape(x)[1]
width = tf.shape(x)[2]
input_channels = tf.shape(x)[3]
x = tf.reshape(x, [self.ensemble_size, -1, height, width, input_channels])
mean = tf.reduce_mean(x, axis=[1, 2, 3]) # [ensemble_size, channels]
mean = tf.cast(mean, tf.float32)
# Var[x] = E[x^2] - E[x]^2
mean_sq = tf.reduce_mean(tf.square(x), axis=[1, 2, 3])
mean_sq = tf.cast(mean_sq, tf.float32)
if replica_context is not None:
mean = tf1.tpu.cross_replica_sum(mean, group_assignment)
mean = mean / num_replicas_per_group
mean_sq = tf1.tpu.cross_replica_sum(mean_sq, group_assignment)
mean_sq = mean_sq / num_replicas_per_group
variance = mean_sq - tf.square(mean)
else:
mean = tf.reduce_mean(x, axis=[0, 1, 2])
mean = tf.cast(mean, tf.float32)
mean_sq = tf.reduce_mean(tf.square(x), axis=[0, 1, 2])
mean_sq = tf.cast(mean_sq, tf.float32)
if replica_context is not None:
mean = tf1.tpu.cross_replica_sum(mean, group_assignment)
mean = mean / num_replicas_per_group
mean_sq = tf1.tpu.cross_replica_sum(mean_sq, group_assignment)
mean_sq = mean_sq / num_replicas_per_group
variance = mean_sq - tf.square(mean)
def _assign(moving, normal):
decay = tf.cast(1. - self.momentum, tf.float32)
diff = tf.cast(moving, tf.float32) - tf.cast(normal, tf.float32)
return moving.assign_sub(decay * diff)
self.add_update(_assign(self.moving_mean, mean))
self.add_update(_assign(self.moving_variance, variance))
mean = tf.cast(mean, x.dtype)
variance = tf.cast(variance, x.dtype)
return mean, variance
def call(self, inputs, training=None):
"""Call function."""
if training:
mean, variance = self._get_mean_and_variance(inputs)
else:
mean, variance = self.moving_mean, self.moving_variance
if self.ensemble_size > 1:
batch_size = tf.shape(inputs)[0]
input_dim = tf.shape(mean)[-1]
examples_per_model = batch_size // self.ensemble_size
mean = tf.reshape(tf.tile(mean, [1, examples_per_model]),
[batch_size, input_dim])
variance_epsilon = tf.cast(self.epsilon, variance.dtype)
inv = tf.math.rsqrt(variance + variance_epsilon)
if self.gamma is not None:
inv *= self.gamma
inv = tf.reshape(tf.tile(inv, [1, examples_per_model]),
[batch_size, input_dim])
# Assuming channel last.
inv = tf.expand_dims(inv, axis=1)
inv = tf.expand_dims(inv, axis=1)
mean = tf.expand_dims(mean, axis=1)
mean = tf.expand_dims(mean, axis=1)
if self.beta is not None:
beta = tf.reshape(tf.tile(self.beta, [1, examples_per_model]),
[batch_size, input_dim])
beta = tf.expand_dims(beta, axis=1)
beta = tf.expand_dims(beta, axis=1)
x = inputs * tf.cast(inv, inputs.dtype) + tf.cast(
beta - mean * inv if self.beta is not None else (
-mean * inv), inputs.dtype)
else:
x = tf.nn.batch_normalization(
inputs,
mean=mean,
variance=variance,
offset=self.beta,
scale=self.gamma,
variance_epsilon=tf.cast(self.epsilon, variance.dtype),
)
return x
| 37.595506 | 80 | 0.671847 |
2b5f66b3f9dc64b65373d189b4039631225974cb | 772 | py | Python | src/zeep/asyncio/bindings.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 3 | 2017-04-01T16:05:52.000Z | 2019-07-26T14:32:26.000Z | src/zeep/asyncio/bindings.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 3 | 2021-03-31T19:37:08.000Z | 2021-12-13T20:32:23.000Z | src/zeep/asyncio/bindings.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 2 | 2020-11-18T09:49:46.000Z | 2021-07-08T14:02:03.000Z | from zeep.wsdl import bindings
__all__ = ["AsyncSoap11Binding", "AsyncSoap12Binding"]
class AsyncSoapBinding(object):
async def send(self, client, options, operation, args, kwargs):
envelope, http_headers = self._create(
operation, args, kwargs, client=client, options=options
)
response = await client.transport.post_xml(
options["address"], envelope, http_headers
)
if client.settings.raw_response:
return response
operation_obj = self.get(operation)
return self.process_reply(client, operation_obj, response)
class AsyncSoap11Binding(AsyncSoapBinding, bindings.Soap11Binding):
pass
class AsyncSoap12Binding(AsyncSoapBinding, bindings.Soap12Binding):
pass
| 26.62069 | 67 | 0.698187 |
bb55b76c19f34e59c577c5adb821b2c3d069ec69 | 1,018 | py | Python | test_autolens/plot/lensing/all_lensing_objects_images.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | test_autolens/plot/lensing/all_lensing_objects_images.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | test_autolens/plot/lensing/all_lensing_objects_images.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | import autolens as al
import autolens.plot as aplt
plotter = aplt.MatPlot2D()
plotter = aplt.MatPlot2D()
grid = al.Grid.uniform(shape_2d=(100, 100), pixel_scales=0.05, sub_size=2)
lens_galaxy = al.Galaxy(
redshift=0.5,
light=al.lp.SphericalExponential(centre=(0.0, 0.0), intensity=1.0),
light_1=al.lp.SphericalExponential(centre=(1.0, 1.0), intensity=1.0),
light_2=al.lp.SphericalExponential(centre=(-1.0, 0.5), intensity=1.0),
)
source_galaxy = al.Galaxy(
redshift=1.0,
light=al.lp.EllipticalExponential(
centre=(0.02, 0.01), intensity=1.0, effective_radius=0.5
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
aplt.LightProfile.figures(light_profile=lens_galaxy.light, grid=grid)
aplt.galaxy.image(galaxy=lens_galaxy, grid=grid)
aplt.plane.image(plane=tracer.image_plane, grid=grid)
aplt.Tracer.figures(
tracer=tracer,
grid=grid,
include=aplt.Include2D(critical_curves=True),
plotter=plotter,
)
| 29.085714 | 75 | 0.702358 |
b90ad47e282e6231557190f589eda89174c1e368 | 980 | py | Python | HLTrigger/Configuration/python/HLT_75e33/psets/TrajectoryFilterForElectrons_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/psets/TrajectoryFilterForElectrons_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/psets/TrajectoryFilterForElectrons_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
TrajectoryFilterForElectrons = cms.PSet(
ComponentType = cms.string('CkfBaseTrajectoryFilter'),
chargeSignificance = cms.double(-1.0),
constantValueForLostHitsFractionFilter = cms.double(2.0),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32(4),
maxCCCLostHits = cms.int32(9999),
maxConsecLostHits = cms.int32(1),
maxLostHits = cms.int32(1),
maxLostHitsFraction = cms.double(0.1),
maxNumberOfHits = cms.int32(-1),
minGoodStripCharge = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutNone')
),
minHitsMinPt = cms.int32(-1),
minNumberOfHitsForLoopers = cms.int32(13),
minNumberOfHitsPerLoop = cms.int32(4),
minPt = cms.double(2.0),
minimumNumberOfHits = cms.int32(5),
nSigmaMinPt = cms.double(5.0),
pixelSeedExtension = cms.bool(False),
seedExtension = cms.int32(0),
seedPairPenalty = cms.int32(0),
strictSeedExtension = cms.bool(False)
)
| 36.296296 | 62 | 0.708163 |
6833b4576d5db593253c3da5ba23f7e638b80c31 | 1,046 | py | Python | examples/advanced/fitspheres2.py | Gjacquenot/vtkplotter | dc865f28dec0c6f10de159dc1f8f20dd69ee74cf | [
"MIT"
] | null | null | null | examples/advanced/fitspheres2.py | Gjacquenot/vtkplotter | dc865f28dec0c6f10de159dc1f8f20dd69ee74cf | [
"MIT"
] | null | null | null | examples/advanced/fitspheres2.py | Gjacquenot/vtkplotter | dc865f28dec0c6f10de159dc1f8f20dd69ee74cf | [
"MIT"
] | 1 | 2019-05-22T09:23:11.000Z | 2019-05-22T09:23:11.000Z | """
For each point finds the 9 closest ones and fit a sphere
color points based on the size of the sphere radius
"""
from __future__ import division, print_function
from vtkplotter import *
vp = Plotter(verbose=0, axes=0, bg="w")
s = vp.load(datadir+"cow.vtk", alpha=0.3) # .subdivide()
pts1, pts2, vals, cols = [], [], [], []
for i, p in enumerate(s.coordinates()):
pts = s.closestPoint(p, N=12) # find the N closest points to p
sph = fitSphere(pts) # find the fitting sphere
if sph is None:
continue
value = sph.info["radius"] * 10
color = colorMap(value, "jet", 0, 1) # map value to a RGB color
n = versor(p - sph.info["center"]) # unit vector from sphere center to p
vals.append(value)
cols.append(color)
pts1.append(p)
pts2.append(p + n / 8)
if not i % 500:
print(i, "/", s.N())
vp.add(Points(pts1, c=cols))
vp.addScalarBar()
vp.add(Lines(pts1, pts2, c="black 0.2"))
vp.add(histogram(vals, title="values", bins=20, vrange=[0, 1]))
vp.add(Text(__doc__, pos=1))
vp.show()
| 29.055556 | 77 | 0.634799 |
7687a299bfc494091a67965b0fbb126b093db8f7 | 4,932 | py | Python | embeddings_for_trees/data/jsonl_data_module.py | JetBrains-Research/embeddings-for-trees | 4609ec341c6563ba11c02ebb57eb07dd866c499e | [
"MIT"
] | 20 | 2020-01-24T11:22:40.000Z | 2022-02-23T19:15:58.000Z | embeddings_for_trees/data/jsonl_data_module.py | JetBrains-Research/embeddings-for-trees | 4609ec341c6563ba11c02ebb57eb07dd866c499e | [
"MIT"
] | 5 | 2020-03-30T13:34:37.000Z | 2022-02-20T12:22:42.000Z | embeddings_for_trees/data/jsonl_data_module.py | JetBrains-Research/embeddings-for-trees | 4609ec341c6563ba11c02ebb57eb07dd866c499e | [
"MIT"
] | 6 | 2020-02-10T21:01:12.000Z | 2022-02-23T19:16:01.000Z | from os import path
from os.path import basename
from typing import List, Optional, Tuple
import dgl
import torch
from commode_utils.common import download_dataset
from commode_utils.vocabulary import build_from_scratch
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from embeddings_for_trees.data.jsonl_dataset import JsonlASTDataset, JsonlTypedASTDataset
from embeddings_for_trees.data.vocabulary import Vocabulary, TypedVocabulary
class JsonlASTDatamodule(LightningDataModule):
_train = "train"
_val = "val"
_test = "test"
_vocabulary: Optional[Vocabulary] = None
def __init__(self, config: DictConfig, data_folder: str):
super().__init__()
self._config = config
self._data_folder = data_folder
self._name = basename(self._data_folder)
def prepare_data(self):
if path.exists(self._data_folder):
print(f"Dataset is already downloaded")
return
if "url" not in self._config:
raise ValueError(f"Config doesn't contain url for, can't download it automatically")
download_dataset(self._config.url, self._data_folder, self._name)
def setup(self, stage: Optional[str] = None):
if not path.exists(path.join(self._data_folder, Vocabulary.vocab_filename)):
print("Can't find vocabulary, collect it from train holdout")
build_from_scratch(path.join(self._data_folder, f"{self._train}.jsonl"), Vocabulary)
vocabulary_path = path.join(self._data_folder, Vocabulary.vocab_filename)
self._vocabulary = Vocabulary(vocabulary_path, self._config.max_labels, self._config.max_tokens)
@staticmethod
def _collate_batch(sample_list: List[Tuple[torch.Tensor, dgl.DGLGraph]]) -> Tuple[torch.Tensor, dgl.DGLGraph]:
labels, graphs = zip(*filter(lambda sample: sample is not None, sample_list))
return torch.cat(labels, dim=1), dgl.batch(graphs)
def _shared_dataloader(self, holdout: str, shuffle: bool) -> DataLoader:
if self._vocabulary is None:
raise RuntimeError(f"Setup vocabulary before creating data loaders")
holdout_file = path.join(self._data_folder, f"{holdout}.jsonl")
dataset = JsonlASTDataset(holdout_file, self._vocabulary, self._config, holdout == self._train)
batch_size = self._config.batch_size if holdout == self._train else self._config.test_batch_size
return DataLoader(
dataset, batch_size, shuffle=shuffle, num_workers=self._config.num_workers, collate_fn=self._collate_batch
)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return self._shared_dataloader(self._train, True)
def val_dataloader(self, *args, **kwargs) -> DataLoader:
return self._shared_dataloader(self._val, False)
def test_dataloader(self, *args, **kwargs) -> DataLoader:
return self._shared_dataloader(self._test, False)
def transfer_batch_to_device(
self, batch: Tuple[torch.Tensor, dgl.DGLGraph], device: torch.device, dataloader_idx: int
) -> Tuple[torch.Tensor, dgl.DGLGraph]:
return batch[0].to(device), batch[1].to(device)
@property
def vocabulary(self) -> Vocabulary:
if self._vocabulary is None:
raise RuntimeError(f"Setup data module for initializing vocabulary")
return self._vocabulary
class JsonlTypedASTDatamodule(JsonlASTDatamodule):
_vocabulary: Optional[TypedVocabulary] = None
@property
def vocabulary(self) -> TypedVocabulary:
if self._vocabulary is None:
raise RuntimeError(f"Setup data module for initializing vocabulary")
return self._vocabulary
def setup(self, stage: Optional[str] = None):
if not path.exists(path.join(self._data_folder, Vocabulary.vocab_filename)):
print("Can't find vocabulary, collect it from train holdout")
build_from_scratch(path.join(self._data_folder, f"{self._train}.jsonl"), TypedVocabulary)
vocabulary_path = path.join(self._data_folder, Vocabulary.vocab_filename)
self._vocabulary = TypedVocabulary(
vocabulary_path, self._config.max_labels, self._config.max_tokens, self._config.max_types
)
def _shared_dataloader(self, holdout: str, shuffle: bool) -> DataLoader:
if self._vocabulary is None:
raise RuntimeError(f"Setup vocabulary before creating data loaders")
holdout_file = path.join(self._data_folder, f"{holdout}.jsonl")
dataset = JsonlTypedASTDataset(holdout_file, self._vocabulary, self._config, holdout == self._train)
batch_size = self._config.batch_size if holdout == self._train else self._config.test_batch_size
return DataLoader(
dataset, batch_size, shuffle=shuffle, num_workers=self._config.num_workers, collate_fn=self._collate_batch
)
| 45.666667 | 118 | 0.715937 |
8b279018139e02e398af185c029b2654c8bb8bc7 | 2,185 | py | Python | vnpy_jqdata/jqdata_datafeed.py | fsksf/vnpy_jqdata | 608c2a7766b0876f7569fe1e5dabd34f03ee28aa | [
"MIT"
] | null | null | null | vnpy_jqdata/jqdata_datafeed.py | fsksf/vnpy_jqdata | 608c2a7766b0876f7569fe1e5dabd34f03ee28aa | [
"MIT"
] | null | null | null | vnpy_jqdata/jqdata_datafeed.py | fsksf/vnpy_jqdata | 608c2a7766b0876f7569fe1e5dabd34f03ee28aa | [
"MIT"
] | null | null | null | from datetime import timedelta
from typing import List, Optional
from pytz import timezone
import traceback
import pandas as pd
import jqdatasdk
from vnpy.trader.datafeed import BaseDatafeed
from vnpy.trader.setting import SETTINGS
from vnpy.trader.constant import Interval
from vnpy.trader.object import BarData, HistoryRequest
INTERVAL_VT2RQ = {
Interval.MINUTE: "1m",
Interval.HOUR: "60m",
Interval.DAILY: "1d",
}
CHINA_TZ = timezone("Asia/Shanghai")
class JqdataDatafeed(BaseDatafeed):
"""聚宽JQDatasdk数据服务接口"""
def __init__(self):
""""""
self.username: str = SETTINGS["datafeed.username"]
self.password: str = SETTINGS["datafeed.password"]
def query_bar_history(self, req: HistoryRequest) -> Optional[List[BarData]]:
"""查询k线数据"""
# 初始化API
try:
jqdatasdk.auth(self.username, self.password)
except Exception:
traceback.print_exc()
return None
# 查询数据
tq_symbol = jqdatasdk.normalize_code(req.symbol)
print(f'查询历史数据:{tq_symbol}, {req}')
df = jqdatasdk.get_price(
security=tq_symbol,
frequency=INTERVAL_VT2RQ.get(req.interval),
start_date=req.start,
end_date=(req.end + timedelta(1)),
panel=False
)
jqdatasdk.logout()
# 解析数据
bars: List[BarData] = []
if df is not None:
for tp in df.itertuples():
# 天勤时间为与1970年北京时间相差的秒数,需要加上8小时差
dt = pd.Timestamp(tp.Index).to_pydatetime()
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
interval=req.interval,
datetime=CHINA_TZ.localize(dt),
open_price=tp.open,
high_price=tp.high,
low_price=tp.low,
close_price=tp.close,
volume=tp.volume,
open_interest=0,
gateway_name="JQ",
)
bars.append(bar)
else:
print(f'查询不到历史数据:{tq_symbol}')
return bars
| 28.012821 | 80 | 0.563844 |
3a3632f9a9bab22fa999cc9e12c3b0fdce460c5c | 368 | py | Python | ns-allinone-3.27/ns-3.27/src/config-store/bindings/modulegen_customizations.py | zack-braun/4607_NS | 43c8fb772e5552fb44bd7cd34173e73e3fb66537 | [
"MIT"
] | 93 | 2019-04-21T08:22:26.000Z | 2022-03-30T04:26:29.000Z | ns-allinone-3.27/ns-3.27/src/config-store/bindings/modulegen_customizations.py | zack-braun/4607_NS | 43c8fb772e5552fb44bd7cd34173e73e3fb66537 | [
"MIT"
] | 12 | 2019-04-19T16:39:58.000Z | 2021-06-22T13:18:32.000Z | ns-allinone-3.27/ns-3.27/src/config-store/bindings/modulegen_customizations.py | zack-braun/4607_NS | 43c8fb772e5552fb44bd7cd34173e73e3fb66537 | [
"MIT"
] | 21 | 2019-05-27T19:36:12.000Z | 2021-07-26T02:37:41.000Z | import os
def post_register_types(root_module):
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
# if GtkConfigStore support is disabled, disable the class wrapper
if 'GtkConfigStore' not in enabled_features:
try:
root_module.classes.remove(root_module['ns3::GtkConfigStore'])
except KeyError:
pass
| 33.454545 | 74 | 0.695652 |
66976f140976eaa008c67f48edb21fc4e303bff0 | 4,620 | py | Python | tests/unit/test_misc.py | pburkindine/localstack-debug | bbdedc4e3af8074d586428a3a519e41f7445ce31 | [
"Apache-2.0"
] | 2 | 2021-11-19T00:06:54.000Z | 2021-12-26T02:03:47.000Z | tests/unit/test_misc.py | SNOmad1/localstack | bae78a0d44a60893d49b27b3fc6562098a78decf | [
"Apache-2.0"
] | 1 | 2021-12-03T01:36:52.000Z | 2021-12-03T01:36:52.000Z | tests/unit/test_misc.py | SNOmad1/localstack | bae78a0d44a60893d49b27b3fc6562098a78decf | [
"Apache-2.0"
] | null | null | null | import asyncio
import concurrent.futures
import datetime
import time
import unittest
import yaml
from requests.models import Response
from localstack import config
from localstack.services.generic_proxy import GenericProxy, ProxyListener
from localstack.utils import async_utils, config_listener
from localstack.utils.aws import aws_stack
from localstack.utils.common import TMP_FILES, download, json_safe, load_file, now_utc, parallelize
from localstack.utils.docker_utils import PortMappings
from localstack.utils.http_utils import create_chunked_data, parse_chunked_data
class TestMisc(unittest.TestCase):
def test_environment(self):
env = aws_stack.Environment.from_json({"prefix": "foobar1"})
self.assertEqual("foobar1", env.prefix)
env = aws_stack.Environment.from_string("foobar2")
self.assertEqual("foobar2", env.prefix)
def test_parse_chunked_data(self):
# See: https://en.wikipedia.org/wiki/Chunked_transfer_encoding
chunked = "4\r\nWiki\r\n5\r\npedia\r\nE\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n"
expected = "Wikipedia in\r\n\r\nchunks."
# test parsing
parsed = parse_chunked_data(chunked)
self.assertEqual(expected.strip(), parsed.strip())
# test roundtrip
chunked_computed = create_chunked_data(expected)
parsed = parse_chunked_data(chunked_computed)
self.assertEqual(expected.strip(), parsed.strip())
def test_convert_yaml_date_strings(self):
yaml_source = "Version: 2012-10-17"
obj = yaml.safe_load(yaml_source)
self.assertIn(type(obj["Version"]), (datetime.date, str))
if isinstance(obj["Version"], datetime.date):
obj = json_safe(obj)
self.assertEqual(str, type(obj["Version"]))
self.assertEqual("2012-10-17", obj["Version"])
def test_timstamp_millis(self):
t1 = now_utc()
t2 = now_utc(millis=True) / 1000
self.assertAlmostEqual(t1, t2, delta=1)
def test_port_mappings(self):
map = PortMappings()
map.add(123)
self.assertEqual("-p 123:123", map.to_str())
map.add(124)
self.assertEqual("-p 123-124:123-124", map.to_str())
map.add(234)
self.assertEqual("-p 123-124:123-124 -p 234:234", map.to_str())
map.add(345, 346)
self.assertEqual("-p 123-124:123-124 -p 234:234 -p 345:346", map.to_str())
map.add([456, 458])
self.assertEqual(
"-p 123-124:123-124 -p 234:234 -p 345:346 -p 456-458:456-458", map.to_str()
)
map = PortMappings()
map.add([123, 124])
self.assertEqual("-p 123-124:123-124", map.to_str())
map.add([234, 237], [345, 348])
self.assertEqual("-p 123-124:123-124 -p 234-237:345-348", map.to_str())
def test_update_config_variable(self):
config_listener.update_config_variable("foo", "bar")
self.assertEqual("bar", config.foo)
def test_async_parallelization(self):
def handler():
time.sleep(0.1)
results.append(1)
async def run():
await async_utils.run_sync(handler, thread_pool=thread_pool)
loop = asyncio.get_event_loop()
thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=100)
results = []
num_items = 1000
handlers = [run() for i in range(num_items)]
loop.run_until_complete(asyncio.gather(*handlers))
self.assertEqual(num_items, len(results))
thread_pool.shutdown()
# This test is not enabled in CI, it is just used for manual
# testing to debug https://github.com/localstack/localstack/issues/213
def run_parallel_download():
file_length = 10000000
class DownloadListener(ProxyListener):
def forward_request(self, method, path, data, headers):
sleep_time = int(path.replace("/", ""))
time.sleep(sleep_time)
response = Response()
response.status_code = 200
response._content = ("%s" % sleep_time) * file_length
return response
test_port = 12124
tmp_file_pattern = "/tmp/test.%s"
proxy = GenericProxy(port=test_port, update_listener=DownloadListener())
proxy.start()
def do_download(param):
tmp_file = tmp_file_pattern % param
TMP_FILES.append(tmp_file)
download("http://localhost:%s/%s" % (test_port, param), tmp_file)
values = (1, 2, 3)
parallelize(do_download, values)
proxy.stop()
for val in values:
tmp_file = tmp_file_pattern % val
assert len(load_file(tmp_file)) == file_length
| 35.538462 | 99 | 0.656277 |
8e27453894c08188a6c0c3dca720651c3d0784ed | 2,456 | py | Python | mageck/mlesgeff.py | desertzk/liulab-mymageck | ab4fb11a2f9142a7703b780264b74d7e0a349add | [
"BSD-3-Clause"
] | null | null | null | mageck/mlesgeff.py | desertzk/liulab-mymageck | ab4fb11a2f9142a7703b780264b74d7e0a349add | [
"BSD-3-Clause"
] | null | null | null | mageck/mlesgeff.py | desertzk/liulab-mymageck | ab4fb11a2f9142a7703b780264b74d7e0a349add | [
"BSD-3-Clause"
] | null | null | null | '''
sgRNA efficiency related functions
'''
from __future__ import print_function
import sys
import numpy as np
from mageck.mleclassdef import *
import logging
def read_sgrna_eff(args):
'''
Read sgRNA efficiency score from file, and convert to initial prediction
'''
if args.sgrna_efficiency != None:
# efficiency prediction
nline=0
sgrna_eff_dict={}
sgscore_max=-1000000.0
sgscore_min=10000000.0
sgscore_minbound=-1
sgscore_maxbound=0.3
for line in open(args.sgrna_efficiency):
nline+=1
field=line.strip().split()
if len(field)<= args.sgrna_eff_name_column or len(field)<=args.sgrna_eff_score_column:
logging.warning('Not enough fields in line '+str(nline)+' of the sgRNA efficiency prediction file. Please check your --sgrna-eff-name-column and --sgrna-eff-score-column options.')
continue
sgid=field[args.sgrna_eff_name_column]
try:
sgscore=float(field[args.sgrna_eff_score_column])
except ValueError:
logging.warning('Error parsing sgRNA efficiency scores: '+field[args.sgrna_eff_score_column]+' at line '+str(nline)+' of the sgRNA efficiency prediction file. Skip this line ..')
sgscore=None
sgrna_eff_dict[sgid]=sgscore
if sgscore > sgscore_max:
sgscore_max=sgscore
if sgscore < sgscore_min:
sgscore_min=sgscore
# end for
logging.info(str(nline)+' lines processed in sgRNA efficiency prediction file '+args.sgrna_efficiency+'.')
for (sgid,sgscore) in sgrna_eff_dict.items():
if sgscore == None:
sgscore=0
else:
#sgscore= (sgscore-sgscore_min)*1.0/(sgscore_max-sgscore_min)
sgscore = (sgscore-sgscore_minbound)*1.0/(sgscore_maxbound-sgscore_minbound)
if sgscore < 1e-2:
sgscore=1e-2
if sgscore >1.0:
sgscore=1.0
sgrna_eff_dict[sgid]=sgscore
args.sgrna_efficiency = sgrna_eff_dict
def sgrna_eff_initial_guess(args,allgenedict):
'''
Convert sgRNA efficiency to initial guess of P(eff)
'''
if args.sgrna_efficiency != None:
logging.info('Converting sgRNA efficiency prediction to the initial guess of pi...')
for (geneid,gk) in allgenedict.items():
sgid=gk.sgrnaid
n=gk.nb_count.shape[1]
gk.w_estimate=np.ones(n)
for ii in range(len(sgid)):
indsgid=sgid[ii]
if indsgid in args.sgrna_efficiency:
gk.w_estimate[ii]=args.sgrna_efficiency[indsgid]
| 32.746667 | 188 | 0.690147 |
b094465bf5ba8e6005a47bc87f23b7db8ee730db | 14,413 | py | Python | django/test/simple.py | mitsuhiko/django | 156b1e97b52ba0608ae91b08a6cb9a8381cbe055 | [
"BSD-3-Clause"
] | 4 | 2015-08-27T22:03:47.000Z | 2017-09-04T08:13:44.000Z | django/test/simple.py | mitsuhiko/django | 156b1e97b52ba0608ae91b08a6cb9a8381cbe055 | [
"BSD-3-Clause"
] | null | null | null | django/test/simple.py | mitsuhiko/django | 156b1e97b52ba0608ae91b08a6cb9a8381cbe055 | [
"BSD-3-Clause"
] | 1 | 2020-01-04T14:51:18.000Z | 2020-01-04T14:51:18.000Z | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner', 'run_tests')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner",
DeprecationWarning
)
super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"Create a complete Django test suite for the provided application module"
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module))
try:
suite.addTest(doctest.DocTestSuite(test_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(TestClass)
except TypeError:
raise ValueError("Test label '%s' does not refer to a test class" % label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
| 40.147632 | 115 | 0.615555 |
91dc0ddccab3eddb2d83ba427aafce63073d72b8 | 913 | py | Python | utils/metrics/Nll.py | debashishc/texygan-analysis | f44d559b15da988080bc1a1d84399db04e69d755 | [
"MIT"
] | 881 | 2018-02-06T18:20:34.000Z | 2022-03-29T13:18:12.000Z | utils/metrics/Nll.py | debashishc/texygan-analysis | f44d559b15da988080bc1a1d84399db04e69d755 | [
"MIT"
] | 48 | 2018-02-13T21:31:24.000Z | 2021-07-03T13:35:21.000Z | utils/metrics/Nll.py | debashishc/texygan-analysis | f44d559b15da988080bc1a1d84399db04e69d755 | [
"MIT"
] | 224 | 2018-02-07T04:48:31.000Z | 2022-03-18T12:26:25.000Z | import numpy as np
from utils.metrics.Metrics import Metrics
class Nll(Metrics):
def __init__(self, data_loader, rnn, sess):
super().__init__()
self.name = 'nll-oracle'
self.data_loader = data_loader
self.sess = sess
self.rnn = rnn
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def get_score(self):
return self.nll_loss()
def nll_loss(self):
nll = []
self.data_loader.reset_pointer()
for it in range(self.data_loader.num_batch):
batch = self.data_loader.next_batch()
# fixme bad taste
try:
g_loss = self.rnn.get_nll(self.sess, batch)
except Exception as e:
g_loss = self.sess.run(self.rnn.pretrain_loss, {self.rnn.x: batch})
nll.append(g_loss)
return np.mean(nll)
| 26.085714 | 83 | 0.580504 |
d6a7b9b83218241f8033d83b1b7282d0d832b2dc | 2,251 | py | Python | aio.api.github/tests/test_abstract_base.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 1 | 2021-12-09T19:24:48.000Z | 2021-12-09T19:24:48.000Z | aio.api.github/tests/test_abstract_base.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 392 | 2021-08-24T15:55:32.000Z | 2022-03-28T14:26:22.000Z | aio.api.github/tests/test_abstract_base.py | envoyproxy/pytooling | db8b60184f8a61b3184a111b0cfaff4780511b46 | [
"Apache-2.0"
] | 3 | 2021-10-06T13:43:11.000Z | 2021-11-29T13:48:56.000Z |
from unittest.mock import MagicMock, PropertyMock
import pytest
from aio.api.github.abstract import base
def test_abstract_base_githubentity_constructor():
entity = base.GithubEntity("GITHUB", "DATA")
assert entity._github == "GITHUB"
assert entity.data == "DATA"
assert entity.github == "GITHUB"
assert "github" not in entity.__dict__
assert entity.__data__ == {}
assert "__data__" not in entity.__dict__
@pytest.mark.parametrize("k", ["A", "B", "C"])
@pytest.mark.parametrize("default", ["UNSET", None, True, False, "SOMESTRING"])
@pytest.mark.parametrize("mangle", ["A", "B", "C"])
def test_abstract_base_githubentity_dunder_getattr(
patches, k, default, mangle):
data = dict(B=MagicMock(), C=MagicMock())
entity = base.GithubEntity("GITHUB", data)
patched = patches(
("GithubEntity.__data__",
dict(new_callable=PropertyMock)),
prefix="aio.api.github.abstract.base")
if default == "UNSET":
args = ()
else:
args = (default, )
with patched as (m_data, ):
if k not in data and default == "UNSET":
with pytest.raises(AttributeError):
entity.__getattr__(k, *args)
else:
result = entity.__getattr__(k, *args)
if k in data:
assert (
result
== m_data.return_value.get.return_value.return_value)
call_args = list(m_data.return_value.get.call_args)
assert call_args[0][0] == k
marker = MagicMock()
assert call_args[0][1](marker) is marker
assert call_args[1] == {}
assert (
list(m_data.return_value.get.return_value.call_args)
== [(data[k], ), {}])
return
elif default != "UNSET":
assert result == default
assert not m_data.called
def test_abstract_base_githubrepoentity_constructor():
entity = base.GithubRepoEntity("REPO", "DATA")
assert entity.repo == "REPO"
assert entity.data == "DATA"
assert isinstance(entity, base.GithubEntity)
def test_abstract_base_githubrepoentity_github():
repo = MagicMock()
entity = base.GithubRepoEntity(repo, "DATA")
assert entity.github == repo.github
assert "github" not in repo.__dict__
| 30.835616 | 79 | 0.637494 |
821260de464090fa7e03e18e074e0a8a76b108dc | 3,524 | py | Python | gbmgeometry/utils/gbm_time.py | drJfunk/gbmgeometry | ca11005c349546ed962bb1bbc4f66d8022ea79a1 | [
"MIT"
] | 4 | 2019-10-31T06:28:13.000Z | 2020-03-28T14:31:07.000Z | gbmgeometry/utils/gbm_time.py | drJfunk/gbmgeometry | ca11005c349546ed962bb1bbc4f66d8022ea79a1 | [
"MIT"
] | 4 | 2020-03-04T16:16:39.000Z | 2020-04-08T11:28:03.000Z | gbmgeometry/utils/gbm_time.py | drJfunk/gbmgeometry | ca11005c349546ed962bb1bbc4f66d8022ea79a1 | [
"MIT"
] | 7 | 2017-10-26T09:32:37.000Z | 2022-03-21T16:32:20.000Z | import astropy.time as time
import astropy.units as u
import numpy as np
class GBMTime(object):
def __init__(self, time_object):
self._time_object = time_object
self._current_mjd = self._time_object.mjd
# get the Fermi MET from the MET
self._calculate_met_from_mjd()
self._utc_zero = self._calculate_MJD_from_MET(0)
# this is when week 9 of the mission starts
self._utc_start_of_sc_data = "2008-08-07T03:35:44.0"
self._time_of_start_of_sc_data = time.Time(self._utc_start_of_sc_data)
@property
def met(self):
return self._met
@property
def utc(self):
return self._time_object.iso
@property
def time(self):
return self._time_object
@property
def t_zero(self):
return self._utc_zero
@property
def mission_week(self):
dt = (self._time_object - self._time_of_start_of_sc_data).to(u.week)
return dt + 10 * u.week
@classmethod
def from_UTC_fits(cls, date_string):
"""
Create a time object from a fits UTC representation
:param date_string:
:return:
"""
time_object = time.Time(date_string, format="fits", scale="utc")
return cls(time_object)
@classmethod
def from_MET(cls, met):
time_object = GBMTime._calculate_MJD_from_MET(met)
return cls(time_object)
@staticmethod
def _calculate_MJD_from_MET(met):
if met <= 252460801.000:
utc_tt_diff = 65.184
elif met <= 362793602.000:
utc_tt_diff = 66.184
elif met <= 457401603.000:
utc_tt_diff = 67.184
elif met <= 504921604.000:
utc_tt_diff = 68.184
else:
utc_tt_diff = 69.184
mjdutc = (
((met - utc_tt_diff) / 86400.0) + 51910 + 0.0007428703703
) # -68.184 added to account for diff between TT and UTC and the 4 leapseconds since 2001
# mjdtt = ((met) / 86400.0) + 51910 + 0.00074287037037
return time.Time(mjdutc, scale="utc", format="mjd")
def _calculate_met_from_mjd(self):
"""
calculated the Fermi MET given MJD
:return:
"""
if self._current_mjd <= 54832.00000000:
utc_tt_diff = 65.184
elif self._current_mjd <= 56109.00000000:
utc_tt_diff = 66.184
elif self._current_mjd <= 57204.00000000:
utc_tt_diff = 67.184
elif self._current_mjd <= 57754.00000000:
utc_tt_diff = 68.184
else:
utc_tt_diff = 69.184
self._met = (
self._current_mjd - 51910 - 0.0007428703703
) * 86400.0 + utc_tt_diff # convert it into MET
def __add__(self, other):
if isinstance(other, time.TimeDelta):
new_time = self._time_object + other
else:
# assuming second addition
dt = time.TimeDelta(other, format="sec")
new_time = self._time_object + dt
return GBMTime(new_time)
def __sub__(self, other):
if isinstance(other, time.TimeDelta):
new_time = self._time_object - other
elif isinstance(other, GBMTime):
dt = self._time_object - other.time
return dt
else:
# assuming second addition
dt = time.TimeDelta(other, format="sec")
new_time = self._time_object - dt
return GBMTime(new_time)
# def mission_week(met):
| 23.184211 | 98 | 0.592509 |
3d6dac9aa511860a68f7416332e64d0c2c0f7c1c | 1,378 | py | Python | app/accelerators/movidius/runtests.py | xscanpix/project-cs-ht18 | aeb864be868613995e8a4075714d146e95c74d72 | [
"Apache-2.0"
] | 1 | 2018-11-06T12:14:40.000Z | 2018-11-06T12:14:40.000Z | app/accelerators/movidius/runtests.py | xscanpix/project-cs-ht18 | aeb864be868613995e8a4075714d146e95c74d72 | [
"Apache-2.0"
] | 1 | 2018-11-15T12:00:32.000Z | 2018-11-15T12:00:32.000Z | app/accelerators/movidius/runtests.py | xscanpix/project-cs-ht18 | aeb864be868613995e8a4075714d146e95c74d72 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
from pprint import pprint
import numpy as np
from mymov.helpers import load_settings
from tests.helpers import load_test_config
from tests.tests import run_tests, gen_model, compile_tf, MovidiusTest
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-tc", "--testconfig", help="Supply test config or tensorflow model", required=True)
parser.add_argument("-s", "--settings", help="Environment settings file.", required=True)
args = parser.parse_args()
os.environ['PROJ_DIR'] = os.getcwd()
try:
jsonData = load_settings(args.settings)
testconfig = load_test_config(args.testconfig)
except Exception as error:
print("Error loading file:", error)
exit()
inputs = []
for _ in range(int(testconfig['iterations'])):
inputs.append(np.random.uniform(0,1,28).reshape(1,28).astype(np.float32))
for index, test in enumerate(testconfig['tests']):
gen_model(jsonData['tfOutputPath'], test)
compile_tf(jsonData, test)
testclass = MovidiusTest(jsonData, testconfig, index, inputs)
print("Test:")
pprint(test)
testclass.run_setup()
for i in range(int(testconfig['runs'])):
run_tests(testclass)
print("Subtest #{} done".format(i + 1))
testclass.run_cleanup() | 33.609756 | 108 | 0.669086 |
24c1063497a72bee61dd20294e2576888308e07c | 3,413 | py | Python | app/app/settings.py | changji2069/django-rest-api | 994ee97137df6581485a3a4f2d1cdc5d51f83c45 | [
"MIT"
] | null | null | null | app/app/settings.py | changji2069/django-rest-api | 994ee97137df6581485a3a4f2d1cdc5d51f83c45 | [
"MIT"
] | null | null | null | app/app/settings.py | changji2069/django-rest-api | 994ee97137df6581485a3a4f2d1cdc5d51f83c45 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sk@re@cwpxj+)6yq5gdv&q(1+ft_mx!nwo8d366ci5vv=!=)+9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| 25.281481 | 91 | 0.685614 |
fd32329bd6e7a7bab0da8de17519f0407c988dfb | 6,909 | py | Python | backend/gunt_31916/settings.py | crowdbotics-apps/gunt-31916 | 66b36d3fdc46bc85de9f6cfed4d0d3375e04ed54 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/gunt_31916/settings.py | crowdbotics-apps/gunt-31916 | 66b36d3fdc46bc85de9f6cfed4d0d3375e04ed54 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/gunt_31916/settings.py | crowdbotics-apps/gunt-31916 | 66b36d3fdc46bc85de9f6cfed4d0d3375e04ed54 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for gunt_31916 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gunt_31916.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gunt_31916.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.525641 | 112 | 0.730207 |
82cfb8516540759b2176bbfe524e5248aabf79d4 | 51 | py | Python | src/gfl/api/listener/__init__.py | mingt2019/GFL | b8e027d2e8cdcc27c85a00744f8790d6db3cc4a3 | [
"MIT"
] | 123 | 2020-06-05T13:30:38.000Z | 2022-03-30T08:39:43.000Z | src/gfl/api/listener/__init__.py | GalaxyLearning/PFL | b8e027d2e8cdcc27c85a00744f8790d6db3cc4a3 | [
"MIT"
] | 13 | 2020-06-19T13:09:47.000Z | 2021-12-22T03:09:24.000Z | src/gfl/api/listener/__init__.py | GalaxyLearning/GFL | b8e027d2e8cdcc27c85a00744f8790d6db3cc4a3 | [
"MIT"
] | 35 | 2020-06-08T15:52:21.000Z | 2022-03-25T11:52:42.000Z | from gfl.api.listener.http_app import HttpListener
| 25.5 | 50 | 0.862745 |
a132af8d776324c0ea5cf31d5b8fcfd1b7dafcbc | 192,101 | py | Python | lib/viewvc.py | cmanley/viewvc | 18ce398586ff99ee13ac64f85c205efdf9c23bad | [
"BSD-2-Clause"
] | null | null | null | lib/viewvc.py | cmanley/viewvc | 18ce398586ff99ee13ac64f85c205efdf9c23bad | [
"BSD-2-Clause"
] | null | null | null | lib/viewvc.py | cmanley/viewvc | 18ce398586ff99ee13ac64f85c205efdf9c23bad | [
"BSD-2-Clause"
] | null | null | null | # -*-python-*-
#
# Copyright (C) 1999-2018 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# viewvc: View CVS/SVN repositories via a web browser
#
# -----------------------------------------------------------------------
__version__ = '1.2-dev'
# this comes from our library; measure the startup time
import debug
debug.t_start('startup')
debug.t_start('imports')
# standard modules that we know are in the path or builtin
import sys
import os
import calendar
import copy
import fnmatch
import gzip
import mimetypes
import re
import rfc822
import stat
import struct
import tempfile
import time
import types
import urllib
# These modules come from our library (the stub has set up the path)
from common import _item, _RCSDIFF_NO_CHANGES, _RCSDIFF_IS_BINARY, _RCSDIFF_ERROR, TemplateData
import accept
import config
import ezt
import popen
import sapi
import vcauth
import vclib
import vclib.ccvs
import vclib.svn
try:
import idiff
except (SyntaxError, ImportError):
idiff = None
debug.t_end('imports')
# Initialize the system tracebacklimit value to 0, meaning stack
# traces will carry only the top-level exception string. This can be
# overridden via configuration.
sys.tracebacklimit = 0
#########################################################################
checkout_magic_path = '*checkout*'
# According to RFC 1738 the '~' character is unsafe in URLs.
# But for compatibility with URLs bookmarked with old releases of ViewCVS:
oldstyle_checkout_magic_path = '~checkout~'
docroot_magic_path = '*docroot*'
viewcvs_mime_type = 'text/vnd.viewcvs-markup'
alt_mime_type = 'text/x-cvsweb-markup'
view_roots_magic = '*viewroots*'
# Put here the variables we need in order to hold our state - they
# will be added (with their current value) to (almost) any link/query
# string you construct.
_sticky_vars = [
'hideattic',
'sortby',
'sortdir',
'logsort',
'diff_format',
'search',
'limit_changes',
]
# for reading/writing between a couple descriptors
CHUNK_SIZE = 8192
# special characters that don't need to be URL encoded
_URL_SAFE_CHARS = "/*~"
class Request:
def __init__(self, server, cfg):
self.server = server
self.cfg = cfg
self.script_name = _normalize_path(server.getenv('SCRIPT_NAME', ''))
self.browser = server.getenv('HTTP_USER_AGENT', 'unknown')
# process the Accept-Language: header, and load the key/value
# files, given the selected language
hal = server.getenv('HTTP_ACCEPT_LANGUAGE','')
try:
self.lang_selector = accept.language(hal)
except accept.AcceptLanguageParseError:
self.lang_selector = accept.language('en')
self.language = self.lang_selector.select_from(cfg.general.languages)
self.kv = cfg.load_kv_files(self.language)
# check for an authenticated username
self.username = server.getenv('REMOTE_USER')
# if we allow compressed output, see if the client does too
self.gzip_compress_level = 0
if cfg.options.allow_compress:
http_accept_encoding = os.environ.get("HTTP_ACCEPT_ENCODING", "")
if "gzip" in filter(None,
map(lambda x: x.strip(),
http_accept_encoding.split(','))):
self.gzip_compress_level = 9 # make this configurable?
def run_viewvc(self):
cfg = self.cfg
# This function first parses the query string and sets the following
# variables. Then it executes the request.
self.view_func = None # function to call to process the request
self.repos = None # object representing current repository
self.rootname = None # name of current root (as used in viewvc.conf)
self.roottype = None # current root type ('svn' or 'cvs')
self.rootpath = None # physical path to current root
self.pathtype = None # type of path, either vclib.FILE or vclib.DIR
self.where = None # path to file or directory in current root
self.query_dict = {} # validated and cleaned up query options
self.path_parts = None # for convenience, equals where.split('/')
self.pathrev = None # current path revision or tag
self.auth = None # authorizer module in use
# redirect if we're loading from a valid but irregular URL
# These redirects aren't neccessary to make ViewVC work, it functions
# just fine without them, but they make it easier for server admins to
# implement access restrictions based on URL
needs_redirect = 0
# Process the query params
for name, values in self.server.params().items():
# we only care about the first value
value = values[0]
# patch up old queries that use 'cvsroot' to look like they used 'root'
if name == 'cvsroot':
name = 'root'
needs_redirect = 1
# same for 'only_with_tag' and 'pathrev'
if name == 'only_with_tag':
name = 'pathrev'
needs_redirect = 1
# redirect view=rev to view=revision, too
if name == 'view' and value == 'rev':
value = 'revision'
needs_redirect = 1
# validate the parameter
_validate_param(name, value)
# if we're here, then the parameter is okay
self.query_dict[name] = value
# Resolve the view parameter into a handler function.
self.view_func = _views.get(self.query_dict.get('view', None),
self.view_func)
# Process PATH_INFO component of query string
path_info = self.server.getenv('PATH_INFO', '')
# clean it up. this removes duplicate '/' characters and any that may
# exist at the front or end of the path.
### we might want to redirect to the cleaned up URL
path_parts = _path_parts(path_info)
if path_parts:
# handle magic path prefixes
if path_parts[0] == docroot_magic_path:
# if this is just a simple hunk of doc, then serve it up
self.where = _path_join(path_parts[1:])
return view_doc(self)
elif path_parts[0] in (checkout_magic_path,
oldstyle_checkout_magic_path):
path_parts.pop(0)
self.view_func = view_checkout
if not cfg.options.checkout_magic:
needs_redirect = 1
# handle tarball magic suffixes
if self.view_func is download_tarball:
if (self.query_dict.get('parent')):
del path_parts[-1]
elif path_parts[-1][-7:] == ".tar.gz":
path_parts[-1] = path_parts[-1][:-7]
# Figure out root name
self.rootname = self.query_dict.get('root')
if self.rootname == view_roots_magic:
del self.query_dict['root']
self.rootname = ""
needs_redirect = 1
elif self.rootname is None:
if cfg.options.root_as_url_component:
if path_parts:
roottype, rootpath, self.rootname, new_path_parts = \
locate_root_from_path(cfg, path_parts)
if roottype is None:
# Perhaps the root name is candidate for renaming...
# Take care of old-new roots mapping
for old_root, new_root in cfg.general.renamed_roots.iteritems():
pp = _path_parts(old_root)
if _path_starts_with(path_parts, pp):
path_parts = path_parts[len(pp):]
self.rootname = new_root
needs_redirect = 1
if self.rootname is None:
# Not found; interpret whole path as root, to show as error
self.rootname = _path_join(path_parts)
path_parts = []
else:
path_parts = new_path_parts
else:
self.rootname = ""
elif self.view_func != view_roots:
self.rootname = cfg.general.default_root
elif cfg.options.root_as_url_component:
needs_redirect = 1
# Take care of old-new roots mapping
for old_root, new_root in cfg.general.renamed_roots.iteritems():
if self.rootname == old_root:
self.rootname = new_root
needs_redirect = 1
self.where = _path_join(path_parts)
self.path_parts = path_parts
if self.rootname:
roottype, rootpath = locate_root(cfg, self.rootname)
if roottype:
# Overlay root-specific options.
cfg.overlay_root_options(self.rootname)
# Setup an Authorizer for this rootname and username
debug.t_start('setup-authorizer')
self.auth = setup_authorizer(cfg, self.username)
debug.t_end('setup-authorizer')
# Create the repository object
debug.t_start('select-repos')
try:
if roottype == 'cvs':
self.rootpath = vclib.ccvs.canonicalize_rootpath(rootpath)
self.repos = vclib.ccvs.CVSRepository(self.rootname,
self.rootpath,
self.auth,
cfg.utilities,
cfg.options.use_rcsparse)
# required so that spawned rcs programs correctly expand
# $CVSHeader$
os.environ['CVSROOT'] = self.rootpath
elif roottype == 'svn':
self.rootpath = vclib.svn.canonicalize_rootpath(rootpath)
self.repos = vclib.svn.SubversionRepository(self.rootname,
self.rootpath,
self.auth,
cfg.utilities,
cfg.options.svn_config_dir)
else:
raise vclib.ReposNotFound()
except vclib.ReposNotFound:
pass
debug.t_end('select-repos')
if self.repos is None:
raise debug.ViewVCException(
'The root "%s" is unknown. If you believe the value is '
'correct, then please double-check your configuration.'
% self.rootname, "404 Not Found")
if self.repos:
debug.t_start('select-repos')
self.repos.open()
debug.t_end('select-repos')
type = self.repos.roottype()
if type == vclib.SVN:
self.roottype = 'svn'
elif type == vclib.CVS:
self.roottype = 'cvs'
else:
raise debug.ViewVCException(
'The root "%s" has an unknown type ("%s"). Expected "cvs" or "svn".'
% (self.rootname, type),
"500 Internal Server Error")
# If this is using an old-style 'rev' parameter, redirect to new hotness.
# Subversion URLs will now use 'pathrev'; CVS ones use 'revision'.
if self.repos and self.query_dict.has_key('rev'):
if self.roottype == 'svn' \
and not self.query_dict.has_key('pathrev') \
and not self.view_func == view_revision:
self.query_dict['pathrev'] = self.query_dict['rev']
del self.query_dict['rev']
else: # elif not self.query_dict.has_key('revision'): ?
self.query_dict['revision'] = self.query_dict['rev']
del self.query_dict['rev']
needs_redirect = 1
if self.repos and self.view_func is not redirect_pathrev:
# If this is an intended-to-be-hidden CVSROOT path, complain.
if cfg.options.hide_cvsroot \
and is_cvsroot_path(self.roottype, path_parts):
raise debug.ViewVCException("Unknown location: /%s" % self.where,
"404 Not Found")
# Make sure path exists
self.pathrev = pathrev = self.query_dict.get('pathrev')
self.pathtype = _repos_pathtype(self.repos, path_parts, pathrev)
if self.pathtype is None:
# Path doesn't exist, see if it could be an old-style ViewVC URL
# with a fake suffix.
result = _strip_suffix('.diff', path_parts, pathrev, vclib.FILE, \
self.repos, view_diff) or \
_strip_suffix('.tar.gz', path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix('root.tar.gz', path_parts, pathrev, vclib.DIR,\
self.repos, download_tarball) or \
_strip_suffix(self.rootname + '-root.tar.gz', \
path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix('root', \
path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix(self.rootname + '-root', \
path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball)
if result:
self.path_parts, self.pathtype, self.view_func = result
self.where = _path_join(self.path_parts)
needs_redirect = 1
else:
raise debug.ViewVCException("Unknown location: /%s" % self.where,
"404 Not Found")
# If we have an old ViewCVS Attic URL which is still valid, redirect
if self.roottype == 'cvs':
attic_parts = None
if (self.pathtype == vclib.FILE and len(self.path_parts) > 1
and self.path_parts[-2] == 'Attic'):
attic_parts = self.path_parts[:-2] + self.path_parts[-1:]
elif (self.pathtype == vclib.DIR and len(self.path_parts) > 0
and self.path_parts[-1] == 'Attic'):
attic_parts = self.path_parts[:-1]
if attic_parts:
self.path_parts = attic_parts
self.where = _path_join(attic_parts)
needs_redirect = 1
if self.view_func is None:
# view parameter is not set, try looking at pathtype and the
# other parameters
if not self.rootname:
self.view_func = view_roots
elif self.pathtype == vclib.DIR:
# ViewCVS 0.9.2 used to put ?tarball=1 at the end of tarball urls
if self.query_dict.has_key('tarball'):
self.view_func = download_tarball
elif self.query_dict.has_key('r1') and self.query_dict.has_key('r2'):
self.view_func = view_diff
else:
self.view_func = view_directory
elif self.pathtype == vclib.FILE:
if self.query_dict.has_key('r1') and self.query_dict.has_key('r2'):
self.view_func = view_diff
elif self.query_dict.has_key('annotate'):
self.view_func = view_annotate
elif self.query_dict.has_key('graph'):
if not self.query_dict.has_key('makeimage'):
self.view_func = view_cvsgraph
else:
self.view_func = view_cvsgraph_image
elif self.query_dict.has_key('revision') \
or cfg.options.default_file_view != "log":
if cfg.options.default_file_view == "markup" \
or self.query_dict.get('content-type', None) \
in (viewcvs_mime_type, alt_mime_type):
self.view_func = view_markup
else:
self.view_func = view_checkout
else:
self.view_func = view_log
# If we've chosen the roots or revision view, our effective
# location is not really "inside" the repository, so we have no
# path and therefore no path parts or type, either.
if self.view_func is view_revision or self.view_func is view_roots:
self.where = ''
self.path_parts = []
self.pathtype = None
# if we have a directory and the request didn't end in "/", then redirect
# so that it does.
if (self.pathtype == vclib.DIR and path_info[-1:] != '/'
and self.view_func is not download_tarball
and self.view_func is not redirect_pathrev):
needs_redirect = 1
# startup is done now.
debug.t_end('startup')
# If we need to redirect, do so. Otherwise, handle our requested view.
if needs_redirect:
self.server.redirect(self.get_url())
else:
debug.t_start('view-func')
self.view_func(self)
debug.t_end('view-func')
def get_url(self, escape=0, partial=0, prefix=0, **args):
"""Constructs a link to another ViewVC page just like the get_link
function except that it returns a single URL instead of a URL
split into components. If PREFIX is set, include the protocol and
server name portions of the URL."""
url, params = apply(self.get_link, (), args)
qs = urllib.urlencode(params)
if qs:
result = urllib.quote(url, _URL_SAFE_CHARS) + '?' + qs
else:
result = urllib.quote(url, _URL_SAFE_CHARS)
if partial:
result = result + (qs and '&' or '?')
if escape:
result = self.server.escape(result)
if prefix:
result = '%s://%s%s' % \
(self.server.getenv("HTTPS") == "on" and "https" or "http",
self.server.getenv("HTTP_HOST"),
result)
return result
def get_form(self, **args):
"""Constructs a link to another ViewVC page just like the get_link
function except that it returns a base URL suitable for use as an
HTML form action, and an iterable object with .name and .value
attributes representing stuff that should be in <input
type=hidden> tags with the link parameters."""
url, params = apply(self.get_link, (), args)
action = self.server.escape(urllib.quote(url, _URL_SAFE_CHARS))
hidden_values = []
for name, value in params.items():
hidden_values.append(_item(name=self.server.escape(name),
value=self.server.escape(value)))
return action, hidden_values
def get_link(self, view_func=None, where=None, pathtype=None, params=None):
"""Constructs a link pointing to another ViewVC page. All arguments
correspond to members of the Request object. If they are set to
None they take values from the current page. Return value is a base
URL and a dictionary of parameters"""
cfg = self.cfg
if view_func is None:
view_func = self.view_func
if params is None:
params = self.query_dict.copy()
else:
params = params.copy()
# must specify both where and pathtype or neither
assert (where is None) == (pathtype is None)
# if we are asking for the revision info view, we don't need any
# path information
if (view_func is view_revision or view_func is view_roots
or view_func is redirect_pathrev):
where = pathtype = None
elif where is None:
where = self.where
pathtype = self.pathtype
# no need to add sticky variables for views with no links
sticky_vars = not (view_func is view_checkout
or view_func is download_tarball)
# The logic used to construct the URL is an inverse of the
# logic used to interpret URLs in Request.run_viewvc
url = self.script_name
# add checkout magic if neccessary
if view_func is view_checkout and cfg.options.checkout_magic:
url = url + '/' + checkout_magic_path
# add root to url
rootname = None
if view_func is not view_roots:
if cfg.options.root_as_url_component:
# remove root from parameter list if present
try:
rootname = params['root']
except KeyError:
rootname = self.rootname
else:
del params['root']
# add root path component
if rootname is not None:
url = url + '/' + rootname
else:
# add root to parameter list
try:
rootname = params['root']
except KeyError:
rootname = params['root'] = self.rootname
# no need to specify default root
if rootname == cfg.general.default_root:
del params['root']
# add 'pathrev' value to parameter list
if (self.pathrev is not None
and not params.has_key('pathrev')
and view_func is not view_revision
and rootname == self.rootname):
params['pathrev'] = self.pathrev
# add path
if where:
url = url + '/' + where
# add trailing slash for a directory
if pathtype == vclib.DIR:
url = url + '/'
# normalize top level URLs for use in Location headers and A tags
elif not url:
url = '/'
# no need to explicitly specify directory view for a directory
if view_func is view_directory and pathtype == vclib.DIR:
view_func = None
# no need to explicitly specify roots view when in root_as_url
# mode or there's no default root
if view_func is view_roots and (cfg.options.root_as_url_component
or not cfg.general.default_root):
view_func = None
# no need to explicitly specify annotate view when
# there's an annotate parameter
if view_func is view_annotate and params.get('annotate') is not None:
view_func = None
# no need to explicitly specify diff view when
# there's r1 and r2 parameters
if (view_func is view_diff and params.get('r1') is not None
and params.get('r2') is not None):
view_func = None
# no need to explicitly specify checkout view when it's the default
# view or when checkout_magic is enabled
if view_func is view_checkout:
if ((cfg.options.default_file_view == "co" and pathtype == vclib.FILE)
or cfg.options.checkout_magic):
view_func = None
# no need to explicitly specify markup view when it's the default view
if view_func is view_markup:
if (cfg.options.default_file_view == "markup" \
and pathtype == vclib.FILE):
view_func = None
# set the view parameter
view_code = _view_codes.get(view_func)
if view_code and not (params.has_key('view') and params['view'] is None):
params['view'] = view_code
# add sticky values to parameter list
if sticky_vars:
for name in _sticky_vars:
value = self.query_dict.get(name)
if value is not None and not params.has_key(name):
params[name] = value
# remove null values from parameter list
for name, value in params.items():
if value is None:
del params[name]
return url, params
def _path_parts(path):
"""Split up a repository path into a list of path components"""
# clean it up. this removes duplicate '/' characters and any that may
# exist at the front or end of the path.
return filter(None, path.split('/'))
def _normalize_path(path):
"""Collapse leading slashes in the script name
You only get multiple slashes in the script name when users accidentally
type urls like http://abc.com//viewvc.cgi/, but we correct for it
because we output the script name in links and web browsers
interpret //viewvc.cgi/ as http://viewvc.cgi/
"""
i = 0
for c in path:
if c != '/':
break
i = i + 1
if i:
return path[i-1:]
return path
def _validate_param(name, value):
"""Validate whether the given value is acceptable for the param name.
If the value is not allowed, then an error response is generated, and
this function throws an exception. Otherwise, it simply returns None.
"""
# First things first -- check that we have a legal parameter name.
try:
validator = _legal_params[name]
except KeyError:
raise debug.ViewVCException(
'An illegal parameter name was provided.',
'400 Bad Request')
# Is there a validator? Is it a regex or a function? Validate if
# we can, returning without incident on valid input.
if validator is None:
return
elif hasattr(validator, 'match'):
if validator.match(value):
return
else:
if validator(value):
return
# If we get here, the input value isn't valid.
raise debug.ViewVCException(
'An illegal value was provided for the "%s" parameter.' % (name),
'400 Bad Request')
def _validate_regex(value):
### we need to watch the flow of these parameters through the system
### to ensure they don't hit the page unescaped. otherwise, these
### parameters could constitute a CSS attack.
try:
re.compile(value)
return True
except:
return None
def _validate_view(value):
# Return true iff VALUE is one of our allowed views.
return _views.has_key(value)
def _validate_mimetype(value):
# For security purposes, we only allow mimetypes from a predefined set
# thereof.
return value in (viewcvs_mime_type, alt_mime_type, 'text/plain')
# obvious things here. note that we don't need uppercase for alpha.
_re_validate_alpha = re.compile('^[a-z]+$')
_re_validate_number = re.compile('^[0-9]+$')
_re_validate_boolint = re.compile('^[01]$')
# when comparing two revs, we sometimes construct REV:SYMBOL, so ':' is needed
_re_validate_revnum = re.compile('^[-_.a-zA-Z0-9:~\\[\\]/]*$')
# date time values
_re_validate_datetime = re.compile(r'^(\d\d\d\d-\d\d-\d\d(\s+\d\d:\d\d'
'(:\d\d)?)?)?$')
# the legal query parameters and their validation functions
_legal_params = {
'root' : None,
'view' : _validate_view,
'search' : _validate_regex,
'p1' : None,
'p2' : None,
'hideattic' : _re_validate_boolint,
'limit_changes' : _re_validate_number,
'sortby' : _re_validate_alpha,
'sortdir' : _re_validate_alpha,
'logsort' : _re_validate_alpha,
'diff_format' : _re_validate_alpha,
'pathrev' : _re_validate_revnum,
'dir_pagestart' : _re_validate_number,
'log_pagestart' : _re_validate_number,
'annotate' : _re_validate_revnum,
'graph' : _re_validate_revnum,
'makeimage' : _re_validate_boolint,
'r1' : _re_validate_revnum,
'tr1' : _re_validate_revnum,
'r2' : _re_validate_revnum,
'tr2' : _re_validate_revnum,
'revision' : _re_validate_revnum,
'content-type' : _validate_mimetype,
# for cvsgraph
'gflip' : _re_validate_boolint,
'gbbox' : _re_validate_boolint,
'gshow' : _re_validate_alpha,
'gleft' : _re_validate_boolint,
'gmaxtag' : _re_validate_number,
# for query
'file_match' : _re_validate_alpha,
'branch_match' : _re_validate_alpha,
'who_match' : _re_validate_alpha,
'comment_match' : _re_validate_alpha,
'dir' : None,
'file' : None,
'branch' : None,
'who' : None,
'comment' : None,
'querysort' : _re_validate_alpha,
'date' : _re_validate_alpha,
'hours' : _re_validate_number,
'mindate' : _re_validate_datetime,
'maxdate' : _re_validate_datetime,
'format' : _re_validate_alpha,
# for redirect_pathrev
'orig_path' : None,
'orig_pathtype' : None,
'orig_pathrev' : None,
'orig_view' : None,
# deprecated
'parent' : _re_validate_boolint,
'rev' : _re_validate_revnum,
'tarball' : _re_validate_boolint,
'hidecvsroot' : _re_validate_boolint,
}
def _path_join(path_parts):
return '/'.join(path_parts)
def _path_starts_with(path_parts, first_path_parts):
if not path_parts:
return False
if len(path_parts) < len(first_path_parts):
return False
return path_parts[0:len(first_path_parts)] == first_path_parts
def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func):
"""strip the suffix from a repository path if the resulting path
is of the specified type, otherwise return None"""
if not path_parts:
return None
l = len(suffix)
if path_parts[-1][-l:] == suffix:
path_parts = path_parts[:]
if len(path_parts[-1]) == l:
del path_parts[-1]
else:
path_parts[-1] = path_parts[-1][:-l]
t = _repos_pathtype(repos, path_parts, rev)
if pathtype == t:
return path_parts, t, view_func
return None
def _repos_pathtype(repos, path_parts, rev):
"""Return the type of a repository path, or None if the path doesn't
exist"""
try:
return repos.itemtype(path_parts, rev)
except vclib.ItemNotFound:
return None
def _orig_path(request, rev_param='revision', path_param=None):
"Get original path of requested file at old revision before copies or moves"
# The 'pathrev' variable is interpreted by nearly all ViewVC views to
# provide a browsable snapshot of a repository at some point in its history.
# 'pathrev' is a tag name for CVS repositories and a revision number for
# Subversion repositories. It's automatically propagated between pages by
# logic in the Request.get_link() function which adds it to links like a
# sticky variable. When 'pathrev' is set, directory listings only include
# entries that exist in the specified revision or tag. Similarly, log pages
# will only show revisions preceding the point in history specified by
# 'pathrev.' Markup, checkout, and annotate pages show the 'pathrev'
# revision of files by default when no other revision is specified.
#
# In Subversion repositories, paths are always considered to refer to the
# pathrev revision. For example, if there is a "circle.jpg" in revision 3,
# which is renamed and modified as "square.jpg" in revision 4, the original
# circle image is visible at the following URLs:
#
# *checkout*/circle.jpg?pathrev=3
# *checkout*/square.jpg?revision=3
# *checkout*/square.jpg?revision=3&pathrev=4
#
# Note that the following:
#
# *checkout*/circle.jpg?rev=3
#
# now gets redirected to one of the following URLs:
#
# *checkout*/circle.jpg?pathrev=3 (for Subversion)
# *checkout*/circle.jpg?revision=3 (for CVS)
#
rev = request.query_dict.get(rev_param, request.pathrev)
path = request.query_dict.get(path_param, request.where)
if rev is not None and hasattr(request.repos, '_getrev'):
try:
pathrev = request.repos._getrev(request.pathrev)
rev = request.repos._getrev(rev)
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid revision', '404 Not Found')
return _path_parts(request.repos.get_location(path, pathrev, rev)), rev
return _path_parts(path), rev
def setup_authorizer(cfg, username, rootname=None):
"""Setup the authorizer. If ROOTNAME is provided, assume that
per-root options have not been overlayed. Otherwise, assume they
have (and fetch the authorizer for the configured root)."""
if rootname is None:
authorizer = cfg.options.authorizer
params = cfg.get_authorizer_params()
else:
authorizer, params = cfg.get_authorizer_and_params_hack(rootname)
# No configured authorizer? No problem.
if not authorizer:
return None
# First, try to load a module with the configured name.
import imp
fp = None
try:
try:
fp, path, desc = imp.find_module("%s" % (authorizer), vcauth.__path__)
my_auth = imp.load_module('viewvc', fp, path, desc)
except ImportError:
raise debug.ViewVCException(
'Invalid authorizer (%s) specified for root "%s"' \
% (authorizer, rootname),
'500 Internal Server Error')
finally:
if fp:
fp.close()
# Add a rootname mapping callback function to the parameters.
def _root_lookup_func(cb_rootname):
return locate_root(cfg, cb_rootname)
# Finally, instantiate our Authorizer.
return my_auth.ViewVCAuthorizer(_root_lookup_func, username, params)
def check_freshness(request, mtime=None, etag=None, weak=0):
cfg = request.cfg
# See if we are supposed to disable etags (for debugging, usually)
if not cfg.options.generate_etags:
return 0
request_etag = request_mtime = None
if etag is not None:
if weak:
etag = 'W/"%s"' % etag
else:
etag = '"%s"' % etag
request_etag = request.server.getenv('HTTP_IF_NONE_MATCH')
if mtime is not None:
try:
request_mtime = request.server.getenv('HTTP_IF_MODIFIED_SINCE')
request_mtime = rfc822.mktime_tz(rfc822.parsedate_tz(request_mtime))
except:
request_mtime = None
# if we have an etag, use that for freshness checking.
# if not available, then we use the last-modified time.
# if not available, then the document isn't fresh.
if etag is not None:
isfresh = (request_etag == etag)
elif mtime is not None:
isfresh = (request_mtime >= mtime)
else:
isfresh = 0
# require revalidation after the configured amount of time
if cfg and cfg.options.http_expiration_time >= 0:
expiration = rfc822.formatdate(time.time() +
cfg.options.http_expiration_time)
request.server.addheader('Expires', expiration)
request.server.addheader('Cache-Control',
'max-age=%d' % cfg.options.http_expiration_time)
if isfresh:
request.server.header(status='304 Not Modified')
else:
if etag is not None:
request.server.addheader('ETag', etag)
if mtime is not None:
request.server.addheader('Last-Modified', rfc822.formatdate(mtime))
return isfresh
def get_view_template(cfg, view_name, language="en"):
# See if the configuration specifies a template for this view. If
# not, use the default template path for this view.
tname = vars(cfg.templates).get(view_name) or view_name + ".ezt"
# Template paths are relative to the configurated template_dir (if
# any, "templates" otherwise), so build the template path as such.
tname = os.path.join(cfg.options.template_dir or "templates", tname)
# Allow per-language template selection.
tname = tname.replace('%lang%', language)
# Finally, construct the whole template path.
tname = cfg.path(tname)
debug.t_start('ezt-parse')
template = ezt.Template(tname)
debug.t_end('ezt-parse')
return template
def get_writeready_server_file(request, content_type=None, encoding=None,
content_length=None, allow_compress=True):
"""Return a file handle to a response body stream, after outputting
any queued special headers (on REQUEST.server) and (optionally) a
'Content-Type' header whose value is CONTENT_TYPE and character set
is ENCODING.
If CONTENT_LENGTH is provided and compression is not in use, also
generate a 'Content-Length' header for this response.
Callers my use ALLOW_COMPRESS to disable compression where it would
otherwise be allowed. (Such as when transmitting an
already-compressed response.)
After this function is called, it is too late to add new headers to
the response."""
if allow_compress and request.gzip_compress_level:
request.server.addheader('Content-Encoding', 'gzip')
elif content_length is not None:
request.server.addheader('Content-Length', content_length)
if content_type and encoding:
request.server.header("%s; charset=%s" % (content_type, encoding))
elif content_type:
request.server.header(content_type)
else:
request.server.header()
if allow_compress and request.gzip_compress_level:
fp = gzip.GzipFile('', 'wb', request.gzip_compress_level,
request.server.file())
else:
fp = request.server.file()
return fp
def generate_page(request, view_name, data, content_type=None):
server_fp = get_writeready_server_file(request, content_type)
template = get_view_template(request.cfg, view_name, request.language)
template.generate(server_fp, data)
def nav_path(request):
"""Return current path as list of items with "name" and "href" members
The href members are view_directory links for directories and view_log
links for files, but are set to None when the link would point to
the current view"""
if not request.repos:
return []
is_dir = request.pathtype == vclib.DIR
# add root item
items = []
root_item = _item(name=request.server.escape(request.repos.name), href=None)
if request.path_parts or request.view_func is not view_directory:
root_item.href = request.get_url(view_func=view_directory,
where='', pathtype=vclib.DIR,
params={}, escape=1)
items.append(root_item)
# add path part items
path_parts = []
for part in request.path_parts:
path_parts.append(part)
is_last = len(path_parts) == len(request.path_parts)
item = _item(name=request.server.escape(part), href=None)
if not is_last or (is_dir and request.view_func is not view_directory):
item.href = request.get_url(view_func=view_directory,
where=_path_join(path_parts),
pathtype=vclib.DIR,
params={}, escape=1)
elif not is_dir and request.view_func is not view_log:
item.href = request.get_url(view_func=view_log,
where=_path_join(path_parts),
pathtype=vclib.FILE,
params={}, escape=1)
items.append(item)
return items
def prep_tags(request, tags):
url, params = request.get_link(params={'pathrev': None})
params = urllib.urlencode(params)
if params:
url = urllib.quote(url, _URL_SAFE_CHARS) + '?' + params + '&pathrev='
else:
url = urllib.quote(url, _URL_SAFE_CHARS) + '?pathrev='
url = request.server.escape(url)
links = [ ]
for tag in tags:
links.append(_item(name=tag.name, href=url+tag.name))
links.sort(lambda a, b: cmp(a.name, b.name))
return links
def guess_mime(filename):
return mimetypes.guess_type(filename)[0]
def is_viewable_image(mime_type):
return mime_type and mime_type in ('image/gif', 'image/jpeg', 'image/png')
def is_text(mime_type):
return not mime_type or mime_type[:5] == 'text/'
def is_cvsroot_path(roottype, path_parts):
return roottype == 'cvs' and path_parts and path_parts[0] == 'CVSROOT'
def is_plain_text(mime_type):
return not mime_type or mime_type == 'text/plain'
def default_view(mime_type, cfg):
"Determine whether file should be viewed through markup page or sent raw"
# If the mime type is text/anything or a supported image format we view
# through the markup page. If the mime type is something else, we send
# it directly to the browser. That way users can see things like flash
# animations, pdfs, word documents, multimedia, etc, which wouldn't be
# very useful marked up. If the mime type is totally unknown (happens when
# we encounter an unrecognized file extension) we also view it through
# the markup page since that's better than sending it text/plain.
if ('markup' in cfg.options.allowed_views and
(is_viewable_image(mime_type) or is_text(mime_type))):
return view_markup
return view_checkout
def is_binary_file_mime_type(mime_type, cfg):
"""Return True iff MIME_TYPE is set and matches one of the binary
file mime type patterns in CFG."""
if mime_type:
for pattern in cfg.options.binary_mime_types:
if fnmatch.fnmatch(mime_type, pattern):
return True
return False
def get_file_view_info(request, where, rev=None, mime_type=None, pathrev=-1):
"""Return an object holding common hrefs and a viewability flag used
for various views of FILENAME at revision REV whose MIME type is
MIME_TYPE.
The object's members include:
view_href
download_href
download_text_href
annotate_href
revision_href
prefer_markup
is_viewable_image
is_binary
"""
rev = rev and str(rev) or None
mime_type = mime_type or guess_mime(where)
if pathrev == -1: # cheesy default value, since we need to preserve None
pathrev = request.pathrev
view_href = None
download_href = None
download_text_href = None
annotate_href = None
revision_href = None
if 'markup' in request.cfg.options.allowed_views:
view_href = request.get_url(view_func=view_markup,
where=where,
pathtype=vclib.FILE,
params={'revision': rev,
'pathrev': pathrev},
escape=1)
if 'co' in request.cfg.options.allowed_views:
download_href = request.get_url(view_func=view_checkout,
where=where,
pathtype=vclib.FILE,
params={'revision': rev,
'pathrev': pathrev},
escape=1)
if not is_plain_text(mime_type):
download_text_href = request.get_url(view_func=view_checkout,
where=where,
pathtype=vclib.FILE,
params={'content-type': 'text/plain',
'revision': rev,
'pathrev': pathrev},
escape=1)
if 'annotate' in request.cfg.options.allowed_views:
annotate_href = request.get_url(view_func=view_annotate,
where=where,
pathtype=vclib.FILE,
params={'annotate': rev,
'pathrev': pathrev},
escape=1)
if request.roottype == 'svn':
revision_href = request.get_url(view_func=view_revision,
params={'revision': rev},
escape=1)
is_binary_file = is_binary_file_mime_type(mime_type, request.cfg)
if is_binary_file:
download_text_href = annotate_href = view_href = None
prefer_markup = False
else:
prefer_markup = default_view(mime_type, request.cfg) == view_markup
return _item(view_href=view_href,
download_href=download_href,
download_text_href=download_text_href,
annotate_href=annotate_href,
revision_href=revision_href,
prefer_markup=ezt.boolean(prefer_markup),
is_viewable_image=ezt.boolean(is_viewable_image(mime_type)),
is_binary=ezt.boolean(is_binary_file))
# Matches URLs
_re_rewrite_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)'
'(://[-a-zA-Z0-9%.~:_/]+)((\?|\&)'
'([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*'
'(#([-a-zA-Z0-9%.~:_]+)?)?)')
# Matches email addresses
_re_rewrite_email = re.compile('([-a-zA-Z0-9_.\+]+)@'
'(([-a-zA-Z0-9]+\.)+[A-Za-z]{2,4})')
# Matches revision references
_re_rewrite_svnrevref = re.compile(r'\b(r|rev #?|revision #?)([0-9]+)\b')
class ViewVCHtmlFormatterTokens:
def __init__(self, tokens):
self.tokens = tokens
def get_result(self, maxlen=0):
"""Format the tokens per the registered set of formatters, and
limited to MAXLEN visible characters (or unlimited if MAXLEN is
0). Return a 3-tuple containing the formatted result string, the
number of visible characters in the result string, and a boolean
flag indicating whether or not S was truncated."""
out = ''
out_len = 0
for token in self.tokens:
chunk, chunk_len = token.converter(token.match, token.userdata, maxlen)
out = out + chunk
out_len = out_len + chunk_len
if maxlen:
maxlen = maxlen - chunk_len
if maxlen <= 0:
return out, out_len, 1
return out, out_len, 0
class ViewVCHtmlFormatter:
"""Format a string as HTML-encoded output with customizable markup
rules, for example turning strings that look like URLs into anchor links.
NOTE: While there might appear to be some unused portions of this
interface, there is a good chance that there are consumers outside
of ViewVC itself that make use of these things.
"""
def __init__(self):
self._formatters = []
def format_url(self, mobj, userdata, maxlen=0):
"""Return a 2-tuple containing:
- the text represented by MatchObject MOBJ, formatted as
linkified URL, with no more than MAXLEN characters in the
non-HTML-tag bits. If MAXLEN is 0, there is no maximum.
- the number of non-HTML-tag characters returned.
"""
s = mobj.group(0)
trunc_s = maxlen and s[:maxlen] or s
return '<a href="%s">%s</a>' % (sapi.escape(s),
sapi.escape(trunc_s)), \
len(trunc_s)
def format_email(self, mobj, userdata, maxlen=0):
"""Return a 2-tuple containing:
- the text represented by MatchObject MOBJ, formatted as
linkified email address, with no more than MAXLEN characters
in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum.
- the number of non-HTML-tag characters returned.
"""
s = mobj.group(0)
trunc_s = maxlen and s[:maxlen] or s
return '<a href="mailto:%s">%s</a>' % (urllib.quote(s),
self._entity_encode(trunc_s)), \
len(trunc_s)
def format_email_obfuscated(self, mobj, userdata, maxlen=0):
"""Return a 2-tuple containing:
- the text represented by MatchObject MOBJ, formatted as an
entity-encoded email address, with no more than MAXLEN characters
in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum.
- the number of non-HTML-tag characters returned.
"""
s = mobj.group(0)
trunc_s = maxlen and s[:maxlen] or s
return self._entity_encode(trunc_s), len(trunc_s)
def format_email_truncated(self, mobj, userdata, maxlen=0):
"""Return a 2-tuple containing:
- the text represented by MatchObject MOBJ, formatted as an
HTML-escaped truncated email address of no more than MAXLEN
characters. If MAXLEN is 0, there is no maximum.
- the number of characters returned.
"""
s = mobj.group(1)
s_len = len(s)
if (maxlen == 0) or (s_len < (maxlen - 1)):
return self._entity_encode(s) + '@…', s_len + 2
elif s_len < maxlen:
return self._entity_encode(s) + '@', s_len + 1
else:
trunc_s = mobj.group(1)[:maxlen]
return self._entity_encode(trunc_s), len(trunc_s)
def format_svnrevref(self, mobj, userdata, maxlen=0):
"""Return a 2-tuple containing:
- the text represented by MatchObject MOBJ, formatted as an
linkified URL to a ViewVC Subversion revision view, with no
more than MAXLEN characters in the non-HTML-tag portions.
If MAXLEN is 0, there is no maximum.
- the number of characters returned.
USERDATA is a function that accepts a revision reference
and returns a URL to that revision.
"""
s = mobj.group(0)
revref = mobj.group(2)
trunc_s = maxlen and s[:maxlen] or s
revref_url = userdata(revref)
return '<a href="%s">%s</a>' % (sapi.escape(revref_url),
sapi.escape(trunc_s)), \
len(trunc_s)
def format_custom_url(self, mobj, userdata, maxlen=0):
"""Return a 2-tuple containing:
- the text represented by MatchObject MOBJ, formatted as an
linkified URL created by substituting match groups 0-9 into
USERDATA (which is a format string that uses \N to
represent the substitution locations) and with no more than
MAXLEN characters in the non-HTML-tag portions. If MAXLEN
is 0, there is no maximum.
- the number of characters returned.
"""
format = userdata
text = mobj.group(0)
url = format
for i in range(9):
try:
repl = mobj.group(i)
except:
repl = ''
url = url.replace('\%d' % (i), repl)
trunc_s = maxlen and text[:maxlen] or text
return '<a href="%s">%s</a>' % (sapi.escape(url),
sapi.escape(trunc_s)), \
len(trunc_s)
def format_text(self, s, unused, maxlen=0):
"""Return a 2-tuple containing:
- the text S, HTML-escaped, containing no more than MAXLEN
characters. If MAXLEN is 0, there is no maximum.
- the number of characters returned.
"""
trunc_s = maxlen and s[:maxlen] or s
return sapi.escape(trunc_s), len(trunc_s)
def add_formatter(self, regexp, conv, userdata=None):
"""Register a formatter which finds instances of strings matching
REGEXP, and using the function CONV and USERDATA to format them.
CONV is a function which accepts three parameters:
- the MatchObject which holds the string portion to be formatted,
- the USERDATA object,
- the maximum number of characters from that string to use for
human-readable output (or 0 to indicate no maximum).
"""
if type(regexp) == type(''):
regexp = re.compile(regexp)
self._formatters.append([regexp, conv, userdata])
def get_result(self, s, maxlen=0):
"""Format S per the set of formatters registered with this object,
and limited to MAXLEN visible characters (or unlimited if MAXLEN
is 0). Return a 3-tuple containing the formatted result string,
the number of visible characters in the result string, and a
boolean flag indicating whether or not S was truncated.
"""
return self.tokenize_text(s).get_result(maxlen)
def tokenize_text(self, s):
"""Return a ViewVCHtmlFormatterTokens object containing the tokens
created when parsing the string S. Callers can use that object's
get_result() function to retrieve HTML-formatted text.
"""
tokens = []
# We could just have a "while s:" here instead of "for line: while
# line:", but for really large log messages with heavy
# tokenization, the cost in both performance and memory
# consumption of the approach taken was atrocious.
for line in s.replace('\r\n', '\n').split('\n'):
line = line + '\n'
while line:
best_match = best_conv = best_userdata = None
for test in self._formatters:
match = test[0].search(line)
# If we find and match and (a) its our first one, or (b) it
# matches text earlier than our previous best match, or (c) it
# matches text at the same location as our previous best match
# but extends to cover more text than that match, then this is
# our new best match.
#
# Implied here is that when multiple formatters match exactly
# the same text, the first formatter in the registration list wins.
if match \
and ((best_match is None) \
or (match.start() < best_match.start())
or ((match.start() == best_match.start()) \
and (match.end() > best_match.end()))):
best_match = match
best_conv = test[1]
best_userdata = test[2]
# If we found a match...
if best_match:
# ... add any non-matching stuff first, then the matching bit.
start = best_match.start()
end = best_match.end()
if start > 0:
tokens.append(_item(match=line[:start],
converter=self.format_text,
userdata=None))
tokens.append(_item(match=best_match,
converter=best_conv,
userdata=best_userdata))
line = line[end:]
else:
# Otherwise, just add the rest of the string.
tokens.append(_item(match=line,
converter=self.format_text,
userdata=None))
line = ''
return ViewVCHtmlFormatterTokens(tokens)
def _entity_encode(self, s):
return ''.join(map(lambda x: '&#%d;' % (ord(x)), s))
class LogFormatter:
def __init__(self, request, log):
self.request = request
self.log = log or ''
self.tokens = None
self.cache = {} # (maxlen, htmlize) => resulting_log
def get(self, maxlen=0, htmlize=1):
cfg = self.request.cfg
# Prefer the cache.
if self.cache.has_key((maxlen, htmlize)):
return self.cache[(maxlen, htmlize)]
# If we are HTML-izing...
if htmlize:
# ...and we don't yet have ViewVCHtmlFormatter() object tokens...
if not self.tokens:
# ... then get them.
lf = ViewVCHtmlFormatter()
# Rewrite URLs.
lf.add_formatter(_re_rewrite_url, lf.format_url)
# Rewrite Subversion revision references.
if self.request.roottype == 'svn':
def revision_to_url(rev):
return self.request.get_url(view_func=view_revision,
params={'revision': rev},
escape=0)
lf.add_formatter(_re_rewrite_svnrevref, lf.format_svnrevref,
revision_to_url)
# Rewrite email addresses.
if cfg.options.mangle_email_addresses == 2:
lf.add_formatter(_re_rewrite_email, lf.format_email_truncated)
elif cfg.options.mangle_email_addresses == 1:
lf.add_formatter(_re_rewrite_email, lf.format_email_obfuscated)
else:
lf.add_formatter(_re_rewrite_email, lf.format_email)
# Add custom rewrite handling per configuration.
for rule in cfg.options.custom_log_formatting:
rule = rule.replace('\\:', '\x01')
regexp, format = map(lambda x: x.strip(), rule.split(':', 1))
regexp = regexp.replace('\x01', ':')
format = format.replace('\x01', ':')
lf.add_formatter(re.compile(regexp), lf.format_custom_url, format)
# Tokenize the log message.
self.tokens = lf.tokenize_text(self.log)
# Use our formatter to ... you know ... format.
log, log_len, truncated = self.tokens.get_result(maxlen)
result_log = log + (truncated and '…' or '')
# But if we're not HTML-izing...
else:
# ...then do much more simplistic transformations as necessary.
log = self.log
if cfg.options.mangle_email_addresses == 2:
log = re.sub(_re_rewrite_email, r'\1@...', log)
result_log = maxlen and log[:maxlen] or log
# In either case, populate the cache and return the results.
self.cache[(maxlen, htmlize)] = result_log
return result_log
_time_desc = {
1 : 'second',
60 : 'minute',
3600 : 'hour',
86400 : 'day',
604800 : 'week',
2628000 : 'month',
31536000 : 'year',
}
def get_time_text(request, interval, num):
"Get some time text, possibly internationalized."
### some languages have even harder pluralization rules. we'll have to
### deal with those on demand
if num == 0:
return ''
text = _time_desc[interval]
if num == 1:
attr = text + '_singular'
fmt = '%d ' + text
else:
attr = text + '_plural'
fmt = '%d ' + text + 's'
try:
fmt = getattr(request.kv.i18n.time, attr)
except AttributeError:
pass
return fmt % num
def little_time(request):
try:
return request.kv.i18n.time.little_time
except AttributeError:
return 'very little time'
def html_time(request, secs, extended=0):
secs = long(time.time()) - secs
if secs < 2:
return little_time(request)
breaks = _time_desc.keys()
breaks.sort()
i = 0
while i < len(breaks):
if secs < 2 * breaks[i]:
break
i = i + 1
value = breaks[i - 1]
s = get_time_text(request, value, secs / value)
if extended and i > 1:
secs = secs % value
value = breaks[i - 2]
ext = get_time_text(request, value, secs / value)
if ext:
### this is not i18n compatible. pass on it for now
s = s + ', ' + ext
return s
def common_template_data(request, revision=None, mime_type=None):
"""Return a TemplateData instance with data dictionary items
common to most ViewVC views."""
cfg = request.cfg
# Initialize data dictionary members (sorted alphanumerically)
data = TemplateData({
'annotate_href' : None,
'cfg' : cfg,
'docroot' : cfg.options.docroot is None \
and request.script_name + '/' + docroot_magic_path \
or cfg.options.docroot,
'download_href' : None,
'download_text_href' : None,
'graph_href': None,
'home_href': request.script_name or '/',
'kv' : request.kv,
'lockinfo' : None,
'log_href' : None,
'nav_path' : nav_path(request),
'pathtype' : None,
'prefer_markup' : ezt.boolean(0),
'queryform_href' : None,
'rev' : None,
'revision_href' : None,
'rootname' : request.rootname \
and request.server.escape(request.rootname) or None,
'rootpath' : request.rootpath,
'roots_href' : None,
'roottype' : request.roottype,
'rss_href' : None,
'tarball_href' : None,
'up_href' : None,
'username' : request.username,
'view' : _view_codes[request.view_func],
'view_href' : None,
'vsn' : __version__,
'where' : request.server.escape(request.where),
})
rev = revision
if not rev:
rev = request.query_dict.get('annotate')
if not rev:
rev = request.query_dict.get('revision')
if not rev and request.roottype == 'svn':
rev = request.query_dict.get('pathrev')
try:
data['rev'] = hasattr(request.repos, '_getrev') \
and request.repos._getrev(rev) or rev
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid revision', '404 Not Found')
if request.pathtype == vclib.DIR:
data['pathtype'] = 'dir'
elif request.pathtype == vclib.FILE:
data['pathtype'] = 'file'
if request.path_parts:
dir = _path_join(request.path_parts[:-1])
data['up_href'] = request.get_url(view_func=view_directory,
where=dir, pathtype=vclib.DIR,
params={}, escape=1)
if 'roots' in cfg.options.allowed_views:
data['roots_href'] = request.get_url(view_func=view_roots,
escape=1, params={})
if request.pathtype == vclib.FILE:
fvi = get_file_view_info(request, request.where, data['rev'], mime_type)
data['view_href'] = fvi.view_href
data['download_href'] = fvi.download_href
data['download_text_href'] = fvi.download_text_href
data['annotate_href'] = fvi.annotate_href
data['revision_href'] = fvi.revision_href
data['prefer_markup'] = fvi.prefer_markup
data['log_href'] = request.get_url(view_func=view_log, params={}, escape=1)
if request.roottype == 'cvs' and cfg.options.use_cvsgraph:
data['graph_href'] = request.get_url(view_func=view_cvsgraph,
params={}, escape=1)
file_data = request.repos.listdir(request.path_parts[:-1],
request.pathrev, {})
def _only_this_file(item):
return item.name == request.path_parts[-1]
entries = filter(_only_this_file, file_data)
if len(entries) == 1:
request.repos.dirlogs(request.path_parts[:-1], request.pathrev,
entries, {})
data['lockinfo'] = entries[0].lockinfo
elif request.pathtype == vclib.DIR:
data['view_href'] = request.get_url(view_func=view_directory,
params={}, escape=1)
if 'tar' in cfg.options.allowed_views:
data['tarball_href'] = request.get_url(view_func=download_tarball,
params={},
escape=1)
if request.roottype == 'svn':
data['revision_href'] = request.get_url(view_func=view_revision,
params={'revision': data['rev']},
escape=1)
data['log_href'] = request.get_url(view_func=view_log,
params={}, escape=1)
if is_querydb_nonempty_for_root(request):
if request.pathtype == vclib.DIR:
params = {}
if request.roottype == 'cvs' and request.pathrev:
params['branch'] = request.pathrev
data['queryform_href'] = request.get_url(view_func=view_queryform,
params=params,
escape=1)
data['rss_href'] = request.get_url(view_func=view_query,
params={'date': 'month',
'format': 'rss'},
escape=1)
elif request.pathtype == vclib.FILE:
parts = _path_parts(request.where)
where = _path_join(parts[:-1])
data['rss_href'] = request.get_url(view_func=view_query,
where=where,
pathtype=request.pathtype,
params={'date': 'month',
'format': 'rss',
'file': parts[-1],
'file_match': 'exact'},
escape=1)
return data
def retry_read(src, reqlen=CHUNK_SIZE):
while 1:
chunk = src.read(CHUNK_SIZE)
if not chunk:
# need to check for eof methods because the cStringIO file objects
# returned by ccvs don't provide them
if hasattr(src, 'eof') and src.eof() is None:
time.sleep(1)
continue
return chunk
def copy_stream(src, dst, htmlize=0):
while 1:
chunk = retry_read(src)
if not chunk:
break
if htmlize:
chunk = sapi.escape(chunk)
dst.write(chunk)
class MarkupPipeWrapper:
"""An EZT callback that outputs a filepointer, plus some optional
pre- and post- text."""
def __init__(self, fp, pretext=None, posttext=None, htmlize=0):
self.fp = fp
self.pretext = pretext
self.posttext = posttext
self.htmlize = htmlize
def __call__(self, ctx):
if self.pretext:
ctx.fp.write(self.pretext)
copy_stream(self.fp, ctx.fp, self.htmlize)
self.fp.close()
if self.posttext:
ctx.fp.write(self.posttext)
_re_rewrite_escaped_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)'
'(://[-a-zA-Z0-9%.~:_/]+)'
'((\?|\&amp;|\&|\&)'
'([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*'
'(#([-a-zA-Z0-9%.~:_]+)?)?)')
def markup_escaped_urls(s):
# Return a copy of S with all URL references -- which are expected
# to be already HTML-escaped -- wrapped in <a href=""></a>.
def _url_repl(match_obj):
url = match_obj.group(0)
unescaped_url = url.replace("&amp;", "&")
return "<a href=\"%s\">%s</a>" % (unescaped_url, url)
return re.sub(_re_rewrite_escaped_url, _url_repl, s)
def detect_encoding(text_block):
"""Return the encoding used by TEXT_BLOCK as detected by the chardet
Python module. (Currently, this is used only when syntax
highlighting is not enabled/available; otherwise, Pygments does this
work for us.)"""
# Does the TEXT_BLOCK start with a BOM?
for bom, encoding in [('\xef\xbb\xbf', 'utf-8'),
('\xff\xfe', 'utf-16'),
('\xfe\xff', 'utf-16be'),
('\xff\xfe\0\0', 'utf-32'),
('\0\0\xfe\xff', 'utf-32be'),
]:
if text_block.startswith(bom):
return encoding
# If no recognized BOM, see if chardet can help us.
try:
import chardet
# If chardet can confidently claimed a match, we'll use its
# findings. (And if that match is 'ascii' -- which is a subset of
# utf-8 -- we'll just call it 'utf-8' and score a zero transform.)
resp = chardet.detect(text_block)
if resp.get('confidence') == 1.0:
encoding = resp.get('encoding')
if encoding is "ascii":
encoding = "utf-8"
return encoding
except:
pass
# By default ... we have no idea.
return None
def transcode_text(text, encoding=None):
"""If ENCODING is provided and not 'utf-8', transcode TEXT from
ENCODING to UTF-8."""
if not encoding or encoding == 'utf-8':
return text
try:
return unicode(text, encoding, 'replace').encode('utf-8', 'replace')
except:
pass
return text
def markup_file_contents(request, cfg, file_lines, filename,
mime_type, encoding, colorize):
# Nothing to mark up? So be it.
if not file_lines:
return []
# Determine if we should (and can) use Pygments to highlight our
# output. Reasons not to include a) being told not to by the
# configuration, b) not being able to import the Pygments modules,
# and c) Pygments not having a lexer for our file's format.
pygments_lexer = None
if colorize:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import ClassNotFound, \
get_lexer_by_name, \
get_lexer_for_mimetype, \
get_lexer_for_filename, \
guess_lexer
if not encoding:
encoding = 'guess'
if cfg.options.detect_encoding:
try:
import chardet
encoding = 'chardet'
except (SyntaxError, ImportError):
pass
# First, see if there's a Pygments lexer associated with MIME_TYPE.
if mime_type:
try:
pygments_lexer = get_lexer_for_mimetype(mime_type,
encoding=encoding,
tabsize=cfg.options.tabsize,
stripnl=False)
except ClassNotFound:
pygments_lexer = None
# If we've no lexer thus far, try to find one based on the FILENAME.
if not pygments_lexer:
try:
pygments_lexer = get_lexer_for_filename(filename,
encoding=encoding,
tabsize=cfg.options.tabsize,
stripnl=False)
except ClassNotFound:
pygments_lexer = None
# Still no lexer? If we've reason to believe this is a text
# file, try to guess the lexer based on the file's content.
if not pygments_lexer and is_text(mime_type) and file_lines:
try:
pygments_lexer = guess_lexer(file_lines[0],
encoding=encoding,
tabsize=cfg.options.tabsize,
stripnl=False)
except ClassNotFound:
pygments_lexer = None
# If we aren't highlighting, just return FILE_LINES, corrected for
# encoding (if possible).
if not pygments_lexer:
# If allowed by configuration, try to detect the source encoding
# for this file. We'll assemble a block of data from the file
# contents to do so... 1024 bytes should be enough.
if not encoding and cfg.options.detect_encoding:
block_size = 0
text_block = ''
for i in range(len(file_lines)):
text_block = text_block + file_lines[i]
if len(text_block) >= 1024:
break
encoding = detect_encoding(text_block)
# Built output data comprised of marked-up and possibly-transcoded
# source text lines wrapped in (possibly dummy) vclib.Annotation
# objects.
file_lines = transcode_text(''.join(file_lines), encoding)
if file_lines[-1] == '\n':
file_lines = file_lines[:-1]
file_lines = file_lines.split('\n')
for i in range(len(file_lines)):
line = file_lines[i]
if cfg.options.tabsize > 0:
line = line.expandtabs(cfg.options.tabsize)
file_lines[i] = markup_escaped_urls(sapi.escape(line))
return file_lines
# If we get here, we're highlighting something.
class PygmentsSink:
def __init__(self):
self.colorized_file_lines = []
def write(self, buf):
### FIXME: Don't bank on write() being called once per line
self.colorized_file_lines.append(markup_escaped_urls(buf.rstrip('\n\r')))
ps = PygmentsSink()
highlight(''.join(file_lines), pygments_lexer,
HtmlFormatter(nowrap=True,
classprefix="pygments-",
encoding='utf-8'), ps)
return ps.colorized_file_lines
def empty_blame_item(line, line_no):
blame_item = vclib.Annotation(line, line_no, None, None, None, None)
blame_item.diff_href = None
return blame_item
def merge_blame_data(file_lines, blame_data):
errorful = 0
if blame_data and (len(file_lines) != len(blame_data)):
errorful = 1
blame_data = None
if not blame_data:
new_blame_data = []
for i in range(len(file_lines)):
line = file_lines[i]
if blame_data:
blame_data[i].text = line
else:
new_blame_data.append(empty_blame_item(line, i + 1))
return blame_data or new_blame_data, errorful
def make_time_string(date, cfg):
"""Returns formatted date string in either local time or UTC.
The passed in 'date' variable is seconds since epoch.
"""
if date is None:
return None
if cfg.options.use_localtime:
tm = time.localtime(date)
else:
tm = time.gmtime(date)
if cfg.options.iso8601_timestamps:
if cfg.options.use_localtime:
if tm[8] and time.daylight:
tz = -time.altzone
else:
tz = -time.timezone
if tz < 0:
tz = '-%02d:%02d' % (-tz // 3600, (-tz % 3600) // 60)
else:
tz = '+%02d:%02d' % (tz // 3600, (tz % 3600) // 60)
else:
tz = 'Z'
return time.strftime('%Y-%m-%dT%H:%M:%S', tm) + tz
else:
return time.asctime(tm) + ' ' + \
(cfg.options.use_localtime and time.tzname[tm[8]] or 'UTC')
def make_rss_time_string(date, cfg):
"""Returns formatted date string in UTC, formatted for RSS.
The passed in 'date' variable is seconds since epoch.
"""
if date is None:
return None
return time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(date)) + ' UTC'
def make_comma_sep_list_string(items):
return ', '.join(map(lambda x: x.name, items))
def is_undisplayable(val):
try:
unicode(val)
return 0
except:
return 1
def get_itemprops(request, path_parts, rev):
itemprops = request.repos.itemprops(path_parts, rev)
propnames = itemprops.keys()
propnames.sort()
props = []
for name in propnames:
# skip non-utf8 property names
if is_undisplayable(name):
continue
lf = LogFormatter(request, itemprops[name])
value = lf.get(maxlen=0, htmlize=1)
undisplayable = is_undisplayable(value)
if undisplayable:
value = None
props.append(_item(name=name, value=value,
undisplayable=ezt.boolean(undisplayable)))
return props
def parse_mime_type(mime_type):
mime_parts = map(lambda x: x.strip(), mime_type.split(';'))
type_subtype = mime_parts[0].lower()
parameters = {}
for part in mime_parts[1:]:
name, value = part.split('=', 1)
parameters[name] = value
return type_subtype, parameters
def calculate_mime_type(request, path_parts, rev):
"""Return a 2-tuple carrying the MIME content type and character
encoding for the file represented by PATH_PARTS in REV. Use REQUEST
for repository access as necessary."""
if not path_parts:
return None, None
mime_type = encoding = None
if request.roottype == 'svn' \
and (not request.cfg.options.svn_ignore_mimetype):
try:
itemprops = request.repos.itemprops(path_parts, rev)
mime_type = itemprops.get('svn:mime-type')
if mime_type:
mime_type, parameters = parse_mime_type(mime_type)
return mime_type, parameters.get('charset')
except:
pass
return guess_mime(path_parts[-1]), None
def assert_viewable_filesize(cfg, filesize):
if cfg.options.max_filesize_kbytes \
and filesize != -1 \
and filesize > (1024 * cfg.options.max_filesize_kbytes):
raise debug.ViewVCException('Display of files larger than %d KB '
'disallowed by configuration'
% (cfg.options.max_filesize_kbytes),
'403 Forbidden')
def markup_or_annotate(request, is_annotate):
cfg = request.cfg
path, rev = _orig_path(request, is_annotate and 'annotate' or 'revision')
lines = fp = image_src_href = None
annotation = 'none'
revision = None
mime_type, encoding = calculate_mime_type(request, path, rev)
# Is this display blocked by 'binary_mime_types' configuration?
if is_binary_file_mime_type(mime_type, cfg):
raise debug.ViewVCException('Display of binary file content disabled '
'by configuration', '403 Forbidden')
# Is this a viewable image type?
if is_viewable_image(mime_type) \
and 'co' in cfg.options.allowed_views:
fp, revision = request.repos.openfile(path, rev, {})
fp.close()
if check_freshness(request, None, revision, weak=1):
return
if is_annotate:
annotation = 'binary'
image_src_href = request.get_url(view_func=view_checkout,
params={'revision': rev}, escape=1)
# Not a viewable image.
else:
filesize = request.repos.filesize(path, rev)
# If configuration disallows display of large files, try to honor
# that request.
assert_viewable_filesize(cfg, filesize)
# If this was an annotation request, try to annotate this file.
# If something goes wrong, that's okay -- we'll gracefully revert
# to a plain markup display.
blame_data = None
if is_annotate:
try:
blame_source, revision = request.repos.annotate(path, rev, False)
if check_freshness(request, None, revision, weak=1):
return
# Create BLAME_DATA list from BLAME_SOURCE, adding diff_href
# items to each relevant "line".
blame_data = []
for item in blame_source:
item.diff_href = None
if item.prev_rev:
item.diff_href = request.get_url(view_func=view_diff,
params={'r1': item.prev_rev,
'r2': item.rev},
escape=1, partial=1)
blame_data.append(item)
annotation = 'annotated'
except vclib.NonTextualFileContents:
annotation = 'binary'
except:
annotation = 'error'
# Grab the file contents.
fp, revision = request.repos.openfile(path, rev, {'cvs_oldkeywords' : 1})
if check_freshness(request, None, revision, weak=1):
fp.close()
return
# If we're limiting by filesize but couldn't pull off the cheap
# check above, we'll try to do so line by line here (while
# building our file_lines array).
if cfg.options.max_filesize_kbytes and filesize == -1:
file_lines = []
filesize = 0
while 1:
line = fp.readline()
if not line:
break
filesize = filesize + len(line)
assert_viewable_filesize(cfg, filesize)
file_lines.append(line)
else:
file_lines = fp.readlines()
fp.close()
# Try to colorize the file contents.
colorize = cfg.options.enable_syntax_coloration
try:
lines = markup_file_contents(request, cfg, file_lines, path[-1],
mime_type, encoding, colorize)
except:
if colorize:
lines = markup_file_contents(request, cfg, file_lines, path[-1],
mime_type, encoding, False)
else:
raise debug.ViewVCException('Error displaying file contents',
'500 Internal Server Error')
# Now, try to match up the annotation data (if any) with the file
# lines.
lines, errorful = merge_blame_data(lines, blame_data)
if errorful:
annotation = 'error'
data = common_template_data(request, revision, mime_type)
data.merge(TemplateData({
'mime_type' : mime_type,
'log' : None,
'date' : None,
'ago' : None,
'author' : None,
'branches' : None,
'tags' : None,
'branch_points' : None,
'changed' : None,
'size' : None,
'state' : None,
'vendor_branch' : None,
'prev' : None,
'orig_path' : None,
'orig_href' : None,
'image_src_href' : image_src_href,
'lines' : lines,
'properties' : get_itemprops(request, path, rev),
'annotation' : annotation,
}))
if cfg.options.show_log_in_markup:
options = {
'svn_latest_log': 1, ### FIXME: Use of this magical value is uncool.
'svn_cross_copies': 1,
}
revs = request.repos.itemlog(path, revision, vclib.SORTBY_REV,
0, 1, options)
entry = revs[-1]
lf = LogFormatter(request, entry.log)
data['date'] = make_time_string(entry.date, cfg)
data['author'] = entry.author
data['changed'] = entry.changed
data['log'] = lf.get(maxlen=0, htmlize=1)
data['size'] = entry.size
if entry.date is not None:
data['ago'] = html_time(request, entry.date, 1)
if request.roottype == 'cvs':
branch = entry.branch_number
prev = entry.prev or entry.parent
data['state'] = entry.dead and 'dead'
data['prev'] = prev and prev.string
data['vendor_branch'] = ezt.boolean(branch and branch[2] % 2 == 1)
### TODO: Should this be using prep_tags() instead?
data['branches'] = make_comma_sep_list_string(entry.branches)
data['tags'] = make_comma_sep_list_string(entry.tags)
data['branch_points']= make_comma_sep_list_string(entry.branch_points)
if path != request.path_parts:
orig_path = _path_join(path)
data['orig_path'] = orig_path
data['orig_href'] = request.get_url(view_func=view_log,
where=orig_path,
pathtype=vclib.FILE,
params={'pathrev': revision},
escape=1)
generate_page(request, "file", data)
def view_markup(request):
if 'markup' not in request.cfg.options.allowed_views:
raise debug.ViewVCException('Markup view is disabled',
'403 Forbidden')
if request.pathtype != vclib.FILE:
raise debug.ViewVCException('Unsupported feature: markup view on '
'directory', '400 Bad Request')
markup_or_annotate(request, 0)
def view_annotate(request):
if 'annotate' not in request.cfg.options.allowed_views:
raise debug.ViewVCException('Annotation view is disabled',
'403 Forbidden')
if request.pathtype != vclib.FILE:
raise debug.ViewVCException('Unsupported feature: annotate view on '
'directory', '400 Bad Request')
markup_or_annotate(request, 1)
def revcmp(rev1, rev2):
rev1 = map(int, rev1.split('.'))
rev2 = map(int, rev2.split('.'))
return cmp(rev1, rev2)
def sort_file_data(file_data, roottype, sortdir, sortby, group_dirs):
# convert sortdir into a sign bit
s = sortdir == "down" and -1 or 1
# in cvs, revision numbers can't be compared meaningfully between
# files, so try to do the right thing and compare dates instead
if roottype == "cvs" and sortby == "rev":
sortby = "date"
def file_sort_sortby(file1, file2, sortby):
# sort according to sortby
if sortby == 'rev':
return s * revcmp(file1.rev, file2.rev)
elif sortby == 'date':
return s * cmp(file2.date, file1.date) # latest date is first
elif sortby == 'log':
return s * cmp(file1.log, file2.log)
elif sortby == 'author':
return s * cmp(file1.author, file2.author)
return s * cmp(file1.name, file2.name)
def file_sort_cmp(file1, file2, sortby=sortby, group_dirs=group_dirs, s=s):
# if we're grouping directories together, sorting is pretty
# simple. a directory sorts "higher" than a non-directory, and
# two directories are sorted as normal.
if group_dirs:
if file1.kind == vclib.DIR:
if file2.kind == vclib.DIR:
# two directories, no special handling.
return file_sort_sortby(file1, file2, sortby)
else:
# file1 is a directory, it sorts first.
return -1
elif file2.kind == vclib.DIR:
# file2 is a directory, it sorts first.
return 1
# we should have data on these. if not, then it is because we requested
# a specific tag and that tag is not present on the file.
if file1.rev is not None and file2.rev is not None:
return file_sort_sortby(file1, file2, sortby)
elif file1.rev is not None:
return -1
elif file2.rev is not None:
return 1
# sort by file name
return s * cmp(file1.name, file2.name)
file_data.sort(file_sort_cmp)
def icmp(x, y):
"""case insensitive comparison"""
return cmp(x.lower(), y.lower())
def view_roots(request):
if 'roots' not in request.cfg.options.allowed_views:
raise debug.ViewVCException('Root listing view is disabled',
'403 Forbidden')
# add in the roots for the selection
roots = []
expand_root_parents(request.cfg)
allroots = list_roots(request)
if len(allroots):
rootnames = allroots.keys()
rootnames.sort(icmp)
for rootname in rootnames:
root_path, root_type, lastmod = allroots[rootname]
href = request.get_url(view_func=view_directory,
where='', pathtype=vclib.DIR,
params={'root': rootname}, escape=1)
if root_type == vclib.SVN:
log_href = request.get_url(view_func=view_log,
where='', pathtype=vclib.DIR,
params={'root': rootname}, escape=1)
else:
log_href = None
roots.append(_item(name=request.server.escape(rootname),
type=root_type,
path=root_path,
author=lastmod and lastmod.author or None,
ago=lastmod and lastmod.ago or None,
date=lastmod and lastmod.date or None,
log=lastmod and lastmod.log or None,
short_log=lastmod and lastmod.short_log or None,
rev=lastmod and lastmod.rev or None,
href=href,
log_href=log_href))
data = common_template_data(request)
data.merge(TemplateData({
'roots' : roots,
'roots_shown' : len(roots),
}))
generate_page(request, "roots", data)
def view_directory(request):
cfg = request.cfg
# For Subversion repositories, the revision acts as a weak validator for
# the directory listing (to take into account template changes or
# revision property changes).
if request.roottype == 'svn':
try:
rev = request.repos._getrev(request.pathrev)
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid revision', '404 Not Found')
tree_rev = request.repos.created_rev(request.where, rev)
if check_freshness(request, None, str(tree_rev), weak=1):
return
# List current directory
options = {}
if request.roottype == 'cvs':
hideattic = int(request.query_dict.get('hideattic',
cfg.options.hide_attic))
options["cvs_subdirs"] = (cfg.options.show_subdir_lastmod and
cfg.options.show_logs)
debug.t_start("listdir")
file_data = request.repos.listdir(request.path_parts, request.pathrev,
options)
debug.t_end("listdir")
# sort with directories first, and using the "sortby" criteria
sortby = request.query_dict.get('sortby', cfg.options.sort_by) or 'file'
sortdir = request.query_dict.get('sortdir', 'up')
# when paging and sorting by filename, we can greatly improve
# performance by "cheating" -- first, we sort (we already have the
# names), then we just fetch dirlogs for the needed entries.
# however, when sorting by other properties or not paging, we've no
# choice but to fetch dirlogs for everything.
debug.t_start("dirlogs")
if cfg.options.dir_pagesize and sortby == 'file':
dirlogs_first = int(request.query_dict.get('dir_pagestart', 0))
if dirlogs_first > len(file_data):
dirlogs_first = 0
dirlogs_last = dirlogs_first + cfg.options.dir_pagesize
for file in file_data:
file.rev = None
file.date = None
file.log = None
file.author = None
file.size = None
file.lockinfo = None
file.dead = None
sort_file_data(file_data, request.roottype, sortdir, sortby,
cfg.options.sort_group_dirs)
# request dirlogs only for the slice of files in "this page"
request.repos.dirlogs(request.path_parts, request.pathrev,
file_data[dirlogs_first:dirlogs_last], options)
else:
request.repos.dirlogs(request.path_parts, request.pathrev,
file_data, options)
sort_file_data(file_data, request.roottype, sortdir, sortby,
cfg.options.sort_group_dirs)
debug.t_end("dirlogs")
# If a regex is specified, build a compiled form thereof for filtering
searchstr = None
search_re = request.query_dict.get('search', '')
if cfg.options.use_re_search and search_re:
searchstr = re.compile(search_re)
# loop through entries creating rows and changing these values
rows = [ ]
dirs_displayed = files_displayed = 0
num_dead = 0
# set some values to be used inside loop
where = request.where
where_prefix = where and where + '/'
debug.t_start("row-building")
for file in file_data:
row = _item(author=None, log=None, short_log=None, state=None, size=None,
log_file=None, log_rev=None, graph_href=None, mime_type=None,
date=None, ago=None, view_href=None, log_href=None,
revision_href=None, annotate_href=None, download_href=None,
download_text_href=None, prefer_markup=ezt.boolean(0),
is_viewable_image=ezt.boolean(0), is_binary=ezt.boolean(0))
if request.roottype == 'cvs' and file.absent:
continue
if cfg.options.hide_errorful_entries and file.errors:
continue
row.rev = file.rev
row.author = file.author
row.state = (request.roottype == 'cvs' and file.dead) and 'dead' or ''
if file.date is not None:
row.date = make_time_string(file.date, cfg)
row.ago = html_time(request, file.date)
if cfg.options.show_logs:
debug.t_start("dirview_logformat")
lf = LogFormatter(request, file.log)
row.log = lf.get(maxlen=0, htmlize=1)
row.short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=1)
debug.t_end("dirview_logformat")
row.lockinfo = file.lockinfo
row.anchor = request.server.escape(file.name)
row.name = request.server.escape(file.name)
row.pathtype = (file.kind == vclib.FILE and 'file') or \
(file.kind == vclib.DIR and 'dir')
row.errors = file.errors
if file.kind == vclib.DIR:
if cfg.options.hide_cvsroot \
and is_cvsroot_path(request.roottype,
request.path_parts + [file.name]):
continue
dirs_displayed += 1
row.view_href = request.get_url(view_func=view_directory,
where=where_prefix+file.name,
pathtype=vclib.DIR,
params={},
escape=1)
if request.roottype == 'svn':
row.revision_href = request.get_url(view_func=view_revision,
params={'revision': file.rev},
escape=1)
if request.roottype == 'cvs' and file.rev is not None:
row.rev = None
if cfg.options.show_logs:
row.log_file = file.newest_file
row.log_rev = file.rev
if request.roottype == 'svn':
row.log_href = request.get_url(view_func=view_log,
where=where_prefix + file.name,
pathtype=vclib.DIR,
params={},
escape=1)
elif file.kind == vclib.FILE:
if searchstr is not None:
if request.roottype == 'cvs' and (file.errors or file.dead):
continue
if not search_file(request.repos, request.path_parts + [file.name],
request.pathrev, searchstr):
continue
if request.roottype == 'cvs' and file.dead:
num_dead = num_dead + 1
if hideattic:
continue
files_displayed += 1
file_where = where_prefix + file.name
if request.roottype == 'svn':
row.size = file.size
row.mime_type, encoding = calculate_mime_type(request,
_path_parts(file_where),
file.rev)
fvi = get_file_view_info(request, file_where, file.rev, row.mime_type)
row.view_href = fvi.view_href
row.download_href = fvi.download_href
row.download_text_href = fvi.download_text_href
row.annotate_href = fvi.annotate_href
row.revision_href = fvi.revision_href
row.prefer_markup = fvi.prefer_markup
row.is_viewable_image = fvi.is_viewable_image
row.is_binary = fvi.is_binary
row.log_href = request.get_url(view_func=view_log,
where=file_where,
pathtype=vclib.FILE,
params={},
escape=1)
if cfg.options.use_cvsgraph and request.roottype == 'cvs':
row.graph_href = request.get_url(view_func=view_cvsgraph,
where=file_where,
pathtype=vclib.FILE,
params={},
escape=1)
rows.append(row)
debug.t_end("row-building")
# Prepare the data that will be passed to the template, based on the
# common template data.
data = common_template_data(request)
data.merge(TemplateData({
'entries' : rows,
'sortby' : sortby,
'sortdir' : sortdir,
'search_re' : request.server.escape(search_re),
'dir_pagestart' : None,
'sortby_file_href' : request.get_url(params={'sortby': 'file',
'sortdir': None},
escape=1),
'sortby_rev_href' : request.get_url(params={'sortby': 'rev',
'sortdir': None},
escape=1),
'sortby_date_href' : request.get_url(params={'sortby': 'date',
'sortdir': None},
escape=1),
'sortby_author_href' : request.get_url(params={'sortby': 'author',
'sortdir': None},
escape=1),
'sortby_log_href' : request.get_url(params={'sortby': 'log',
'sortdir': None},
escape=1),
'files_shown' : files_displayed,
'dirs_shown' : dirs_displayed,
'num_dead' : num_dead,
'youngest_rev' : None,
'youngest_rev_href' : None,
'selection_form' : None,
'attic_showing' : None,
'show_attic_href' : None,
'hide_attic_href' : None,
'branch_tags': None,
'plain_tags': None,
'properties': get_itemprops(request, request.path_parts, request.pathrev),
'tree_rev' : None,
'tree_rev_href' : None,
'dir_paging_action' : None,
'dir_paging_hidden_values' : [],
'search_re_action' : None,
'search_re_hidden_values' : [],
# Populated by paging()/paging_sws()
'picklist' : [],
'picklist_len' : 0,
# Populated by pathrev_form()
'pathrev_action' : None,
'pathrev_hidden_values' : [],
'pathrev_clear_action' : None,
'pathrev_clear_hidden_values' : [],
'pathrev' : None,
'lastrev' : None,
}))
# clicking on sort column reverses sort order
if sortdir == 'down':
revsortdir = None # 'up'
else:
revsortdir = 'down'
if sortby in ['file', 'rev', 'date', 'log', 'author']:
data['sortby_%s_href' % sortby] = request.get_url(params={'sortdir':
revsortdir},
escape=1)
# CVS doesn't support sorting by rev
if request.roottype == "cvs":
data['sortby_rev_href'] = None
# set cvs-specific fields
if request.roottype == 'cvs':
plain_tags = options['cvs_tags']
plain_tags.sort(icmp)
plain_tags.reverse()
data['plain_tags'] = []
for plain_tag in plain_tags:
data['plain_tags'].append(_item(name=plain_tag,revision=None))
branch_tags = options['cvs_branches']
branch_tags.sort(icmp)
branch_tags.reverse()
data['branch_tags'] = []
for branch_tag in branch_tags:
data['branch_tags'].append(_item(name=branch_tag,revision=None))
data['attic_showing'] = ezt.boolean(not hideattic)
data['show_attic_href'] = request.get_url(params={'hideattic': 0},
escape=1)
data['hide_attic_href'] = request.get_url(params={'hideattic': 1},
escape=1)
# set svn-specific fields
elif request.roottype == 'svn':
data['tree_rev'] = tree_rev
data['tree_rev_href'] = request.get_url(view_func=view_revision,
params={'revision': tree_rev},
escape=1)
data['youngest_rev'] = request.repos.get_youngest_revision()
data['youngest_rev_href'] = request.get_url(view_func=view_revision,
params={},
escape=1)
if cfg.options.dir_pagesize:
data['dir_paging_action'], data['dir_paging_hidden_values'] = \
request.get_form(params={'dir_pagestart': None})
pathrev_form(request, data)
if cfg.options.use_re_search:
data['search_re_action'], data['search_re_hidden_values'] = \
request.get_form(params={'search': None})
if cfg.options.dir_pagesize:
data['dir_pagestart'] = int(request.query_dict.get('dir_pagestart',0))
data['entries'] = paging(data, 'entries', data['dir_pagestart'], 'name',
cfg.options.dir_pagesize)
generate_page(request, "directory", data)
def paging(data, key, pagestart, local_name, pagesize):
# Implement paging
# Create the picklist
picklist = data['picklist'] = []
for i in range(0, len(data[key]), pagesize):
pick = _item(start=None, end=None, count=None, more=ezt.boolean(0))
pick.start = getattr(data[key][i], local_name)
pick.count = i
pick.page = (i / pagesize) + 1
try:
pick.end = getattr(data[key][i+pagesize-1], local_name)
except IndexError:
pick.end = getattr(data[key][-1], local_name)
picklist.append(pick)
data['picklist_len'] = len(picklist)
# Need to fix
# pagestart can be greater than the length of data[key] if you
# select a tag or search while on a page other than the first.
# Should reset to the first page, this test won't do that every
# time that it is needed.
# Problem might go away if we don't hide non-matching files when
# selecting for tags or searching.
if pagestart > len(data[key]):
pagestart = 0
pageend = pagestart + pagesize
# Slice
return data[key][pagestart:pageend]
def paging_sws(data, key, pagestart, local_name, pagesize,
extra_pages, offset):
"""Implement sliding window-style paging."""
# Create the picklist
last_requested = pagestart + (extra_pages * pagesize)
picklist = data['picklist'] = []
has_more = ezt.boolean(0)
for i in range(0, len(data[key]), pagesize):
pick = _item(start=None, end=None, count=None, more=ezt.boolean(0))
pick.start = getattr(data[key][i], local_name)
pick.count = offset + i
pick.page = (pick.count / pagesize) + 1
try:
pick.end = getattr(data[key][i+pagesize-1], local_name)
except IndexError:
pick.end = getattr(data[key][-1], local_name)
picklist.append(pick)
if pick.count >= last_requested:
pick.more = ezt.boolean(1)
break
data['picklist_len'] = len(picklist)
first = pagestart - offset
# FIXME: first can be greater than the length of data[key] if
# you select a tag or search while on a page other than the first.
# Should reset to the first page, but this test won't do that every
# time that it is needed. Problem might go away if we don't hide
# non-matching files when selecting for tags or searching.
if first > len(data[key]):
pagestart = 0
pageend = first + pagesize
# Slice
return data[key][first:pageend]
def pathrev_form(request, data):
lastrev = None
if request.roottype == 'svn':
data['pathrev_action'], data['pathrev_hidden_values'] = \
request.get_form(view_func=redirect_pathrev,
params={'pathrev': None,
'orig_path': request.where,
'orig_pathtype': request.pathtype,
'orig_pathrev': request.pathrev,
'orig_view': _view_codes.get(request.view_func)})
if request.pathrev:
youngest = request.repos.get_youngest_revision()
lastrev = request.repos.last_rev(request.where, request.pathrev,
youngest)[0]
if lastrev == youngest:
lastrev = None
data['pathrev'] = request.pathrev
data['lastrev'] = lastrev
action, hidden_values = request.get_form(params={'pathrev': lastrev})
if request.roottype != 'svn':
data['pathrev_action'] = action
data['pathrev_hidden_values'] = hidden_values
data['pathrev_clear_action'] = action
data['pathrev_clear_hidden_values'] = hidden_values
return lastrev
def redirect_pathrev(request):
assert request.roottype == 'svn'
new_pathrev = request.query_dict.get('pathrev') or None
path = request.query_dict.get('orig_path', '')
pathtype = request.query_dict.get('orig_pathtype')
pathrev = request.query_dict.get('orig_pathrev')
view = _views.get(request.query_dict.get('orig_view'))
youngest = request.repos.get_youngest_revision()
# go out of the way to allow revision numbers higher than youngest
try:
new_pathrev = int(new_pathrev)
except ValueError:
new_pathrev = youngest
except TypeError:
pass
else:
if new_pathrev > youngest:
new_pathrev = youngest
if _repos_pathtype(request.repos, _path_parts(path), new_pathrev):
pathrev = new_pathrev
else:
pathrev, path = request.repos.last_rev(path, pathrev, new_pathrev)
# allow clearing sticky revision by submitting empty string
if new_pathrev is None and pathrev == youngest:
pathrev = None
request.server.redirect(request.get_url(view_func=view,
where=path,
pathtype=pathtype,
params={'pathrev': pathrev}))
def view_log(request):
cfg = request.cfg
diff_format = request.query_dict.get('diff_format', cfg.options.diff_format)
pathtype = request.pathtype
if pathtype is vclib.DIR:
if request.roottype == 'cvs':
raise debug.ViewVCException('Unsupported feature: log view on CVS '
'directory', '400 Bad Request')
mime_type = encoding = None
else:
mime_type, encoding = calculate_mime_type(request,
request.path_parts,
request.pathrev)
options = {}
options['svn_show_all_dir_logs'] = 1 ### someday make this optional?
options['svn_cross_copies'] = cfg.options.cross_copies
logsort = request.query_dict.get('logsort', cfg.options.log_sort)
if request.roottype == "svn":
sortby = vclib.SORTBY_DEFAULT
logsort = None
else:
if logsort == 'date':
sortby = vclib.SORTBY_DATE
elif logsort == 'rev':
sortby = vclib.SORTBY_REV
else:
sortby = vclib.SORTBY_DEFAULT
first = last = 0
log_pagestart = None
if cfg.options.log_pagesize:
log_pagestart = int(request.query_dict.get('log_pagestart', 0))
total = cfg.options.log_pagesextra * cfg.options.log_pagesize
first = log_pagestart - min(log_pagestart, total)
last = log_pagestart + (total + cfg.options.log_pagesize) + 1
show_revs = request.repos.itemlog(request.path_parts, request.pathrev,
sortby, first, last - first, options)
# selected revision
selected_rev = request.query_dict.get('r1')
entries = [ ]
name_printed = { }
cvs = request.roottype == 'cvs'
for rev in show_revs:
entry = _item()
entry.rev = rev.string
entry.state = (cvs and rev.dead and 'dead')
entry.author = rev.author
entry.changed = rev.changed
entry.date = make_time_string(rev.date, cfg)
entry.ago = None
if rev.date is not None:
entry.ago = html_time(request, rev.date, 1)
entry.size = rev.size
entry.lockinfo = rev.lockinfo
entry.branch_point = None
entry.next_main = None
entry.orig_path = None
entry.copy_path = None
lf = LogFormatter(request, rev.log or '')
entry.log = lf.get(maxlen=0, htmlize=1)
entry.view_href = None
entry.download_href = None
entry.download_text_href = None
entry.annotate_href = None
entry.revision_href = None
entry.sel_for_diff_href = None
entry.diff_to_sel_href = None
entry.diff_to_prev_href = None
entry.diff_to_branch_href = None
entry.diff_to_main_href = None
if request.roottype == 'cvs':
prev = rev.prev or rev.parent
entry.prev = prev and prev.string
branch = rev.branch_number
entry.vendor_branch = ezt.boolean(branch and branch[2] % 2 == 1)
entry.branches = prep_tags(request, rev.branches)
entry.tags = prep_tags(request, rev.tags)
entry.branch_points = prep_tags(request, rev.branch_points)
entry.tag_names = map(lambda x: x.name, rev.tags)
if branch and not name_printed.has_key(branch):
entry.branch_names = map(lambda x: x.name, rev.branches)
name_printed[branch] = 1
else:
entry.branch_names = [ ]
if rev.parent and rev.parent is not prev and not entry.vendor_branch:
entry.branch_point = rev.parent.string
# if it's the last revision on a branch then diff against the
# last revision on the higher branch (e.g. change is committed and
# brought over to -stable)
if not rev.next and rev.parent and rev.parent.next:
r = rev.parent.next
while r.next:
r = r.next
entry.next_main = r.string
elif request.roottype == 'svn':
entry.prev = rev.prev and rev.prev.string
entry.branches = entry.tags = entry.branch_points = [ ]
entry.tag_names = entry.branch_names = [ ]
entry.vendor_branch = None
if rev.filename != request.where:
entry.orig_path = rev.filename
entry.copy_path = rev.copy_path
entry.copy_rev = rev.copy_rev
if entry.orig_path:
entry.orig_href = request.get_url(view_func=view_log,
where=entry.orig_path,
pathtype=vclib.FILE,
params={'pathrev': rev.string},
escape=1)
if rev.copy_path:
entry.copy_href = request.get_url(view_func=view_log,
where=rev.copy_path,
pathtype=vclib.FILE,
params={'pathrev': rev.copy_rev},
escape=1)
# view/download links
if pathtype is vclib.FILE:
fvi = get_file_view_info(request, request.where, rev.string, mime_type)
entry.view_href = fvi.view_href
entry.download_href = fvi.download_href
entry.download_text_href = fvi.download_text_href
entry.annotate_href = fvi.annotate_href
entry.revision_href = fvi.revision_href
entry.prefer_markup = fvi.prefer_markup
else:
entry.revision_href = request.get_url(view_func=view_revision,
params={'revision': rev.string},
escape=1)
entry.view_href = request.get_url(view_func=view_directory,
where=rev.filename,
pathtype=vclib.DIR,
params={'pathrev': rev.string},
escape=1)
# calculate diff links
if selected_rev != entry.rev:
entry.sel_for_diff_href = \
request.get_url(view_func=view_log,
params={'r1': entry.rev,
'log_pagestart': log_pagestart},
escape=1)
if entry.prev is not None:
entry.diff_to_prev_href = \
request.get_url(view_func=view_diff,
params={'r1': entry.prev,
'r2': entry.rev,
'diff_format': None},
escape=1)
if selected_rev and \
selected_rev != str(entry.rev) and \
selected_rev != str(entry.prev) and \
selected_rev != str(entry.branch_point) and \
selected_rev != str(entry.next_main):
entry.diff_to_sel_href = \
request.get_url(view_func=view_diff,
params={'r1': selected_rev,
'r2': entry.rev,
'diff_format': None},
escape=1)
if entry.next_main:
entry.diff_to_main_href = \
request.get_url(view_func=view_diff,
params={'r1': entry.next_main,
'r2': entry.rev,
'diff_format': None},
escape=1)
if entry.branch_point:
entry.diff_to_branch_href = \
request.get_url(view_func=view_diff,
params={'r1': entry.branch_point,
'r2': entry.rev,
'diff_format': None},
escape=1)
# Save our escaping until the end so stuff above works
if entry.orig_path:
entry.orig_path = request.server.escape(entry.orig_path)
if entry.copy_path:
entry.copy_path = request.server.escape(entry.copy_path)
entries.append(entry)
diff_select_action, diff_select_hidden_values = \
request.get_form(view_func=view_diff,
params={'r1': None, 'r2': None, 'tr1': None,
'tr2': None, 'diff_format': None})
logsort_action, logsort_hidden_values = \
request.get_form(params={'logsort': None})
data = common_template_data(request)
data.merge(TemplateData({
'default_branch' : None,
'mime_type' : mime_type,
'rev_selected' : selected_rev,
'diff_format' : diff_format,
'logsort' : logsort,
'human_readable' : ezt.boolean(diff_format in ('f', 'h', 'l')),
'log_pagestart' : None,
'log_paging_action' : None,
'log_paging_hidden_values' : [],
'entries': entries,
'head_prefer_markup' : ezt.boolean(0),
'head_view_href' : None,
'head_download_href': None,
'head_download_text_href': None,
'head_annotate_href': None,
'tag_prefer_markup' : ezt.boolean(0),
'tag_view_href' : None,
'tag_download_href': None,
'tag_download_text_href': None,
'tag_annotate_href': None,
'diff_select_action' : diff_select_action,
'diff_select_hidden_values' : diff_select_hidden_values,
'logsort_action' : logsort_action,
'logsort_hidden_values' : logsort_hidden_values,
'tags' : [],
'branch_tags' : [],
'plain_tags' : [],
# Populated by paging()/paging_sws()
'picklist' : [],
'picklist_len' : 0,
# Populated by pathrev_form()
'pathrev_action' : None,
'pathrev_hidden_values' : [],
'pathrev_clear_action' : None,
'pathrev_clear_hidden_values' : [],
'pathrev' : None,
'lastrev' : None,
}))
lastrev = pathrev_form(request, data)
if pathtype is vclib.FILE:
if not request.pathrev or lastrev is None:
fvi = get_file_view_info(request, request.where, None, mime_type, None)
data['head_view_href']= fvi.view_href
data['head_download_href']= fvi.download_href
data['head_download_text_href']= fvi.download_text_href
data['head_annotate_href']= fvi.annotate_href
data['head_prefer_markup']= fvi.prefer_markup
if request.pathrev and request.roottype == 'cvs':
fvi = get_file_view_info(request, request.where, None, mime_type)
data['tag_view_href']= fvi.view_href
data['tag_download_href']= fvi.download_href
data['tag_download_text_href']= fvi.download_text_href
data['tag_annotate_href']= fvi.annotate_href
data['tag_prefer_markup']= fvi.prefer_markup
else:
data['head_view_href'] = request.get_url(view_func=view_directory,
params={}, escape=1)
taginfo = options.get('cvs_tags', {})
tagitems = taginfo.items()
tagitems.sort()
tagitems.reverse()
main = taginfo.get('MAIN')
if main:
# Default branch may have multiple names so we list them
branches = []
for branch in main.aliases:
# Don't list MAIN
if branch is not main:
branches.append(branch)
data['default_branch'] = prep_tags(request, branches)
for tag, rev in tagitems:
rev_str = None
if rev.number:
rev_str = '.'.join(map(str, rev.number))
if rev.co_rev:
data['tags'].append(_item(rev=rev.co_rev.string, name=tag))
if rev.is_branch:
data['branch_tags'].append(_item(name=tag,revision=rev_str))
else:
data['plain_tags'].append(_item(name=tag,revision=rev_str))
if cfg.options.log_pagesize:
data['log_paging_action'], data['log_paging_hidden_values'] = \
request.get_form(params={'log_pagestart': None,
'r1': selected_rev,
})
data['log_pagestart'] = int(request.query_dict.get('log_pagestart',0))
data['entries'] = paging_sws(data, 'entries', data['log_pagestart'],
'rev', cfg.options.log_pagesize,
cfg.options.log_pagesextra, first)
generate_page(request, "log", data)
def view_checkout(request):
cfg = request.cfg
if 'co' not in cfg.options.allowed_views:
raise debug.ViewVCException('Checkout view is disabled',
'403 Forbidden')
if request.pathtype != vclib.FILE:
raise debug.ViewVCException('Unsupported feature: checkout view on '
'directory', '400 Bad Request')
path, rev = _orig_path(request)
fp, revision = request.repos.openfile(path, rev, {})
# The revision number acts as a strong validator.
if not check_freshness(request, None, revision):
mime_type, encoding = calculate_mime_type(request, path, rev)
mime_type = request.query_dict.get('content-type') \
or mime_type \
or 'text/plain'
server_fp = get_writeready_server_file(request, mime_type, encoding)
copy_stream(fp, server_fp)
fp.close()
def cvsgraph_make_reqopt(request, cfgname, queryparam, optvalue):
# Return a cvsgraph custom option substring bit OPTVALUE based on
# CFGNAME's presence in the allowed list of user-configurable
# options and QUERYPARAM's presence and boolean interpretation in
# the actual request; otherwise, return the empty string for options
# that either aren't overridden or aren't allowed to be overridden.
if (cfgname in request.cfg.options.allowed_cvsgraph_useropts) \
and (int(request.query_dict.get(queryparam, 0))):
return optvalue
return ''
def cvsgraph_normalize_gshow(request):
# Return the effective value of the 'gshow' query parameter, noting
# that a missing parameter is the same as gshow=all, and treating a
# bogus parameter value as the same as gshow=all, too.
gshow = request.query_dict.get('gshow', 'all')
if gshow not in ('all', 'inittagged', 'tagged'):
gshow = 'all'
return gshow
def cvsgraph_extraopts(request):
# Build a set of -O options for controlling cvsgraph's behavior,
# based on what the user has requested and filtered against what the
# user is allowed to request.
cfg = request.cfg
ep = '-O'
# Simple mappings of boolean flags
ep = ep + cvsgraph_make_reqopt(request, 'invert', 'gflip',
';upside_down=true')
ep = ep + cvsgraph_make_reqopt(request, 'branchbox', 'gbbox',
';branch_dupbox=true')
ep = ep + cvsgraph_make_reqopt(request, 'rotate', 'gleft',
';left_right=true')
# Stripping is a little more complex.
if ('show' in request.cfg.options.allowed_cvsgraph_useropts):
gshow = cvsgraph_normalize_gshow(request)
if gshow == 'inittagged':
ep = ep + ';strip_untagged=true'
elif gshow == 'tagged':
ep = ep + ';strip_untagged=true;strip_first_rev=true'
# And tag limitation has a user-supplied value to mess with.
if ('limittags' in request.cfg.options.allowed_cvsgraph_useropts) \
and request.query_dict.has_key('gmaxtag'):
ep = ep + ';rev_maxtags=' + request.query_dict['gmaxtag']
return ep + ';'
def view_cvsgraph_image(request):
"output the image rendered by cvsgraph"
# this function is derived from cgi/cvsgraphmkimg.cgi
cfg = request.cfg
if not cfg.options.use_cvsgraph:
raise debug.ViewVCException('Graph view is disabled', '403 Forbidden')
# If cvsgraph can't find its supporting libraries, uncomment and set
# accordingly. Do the same in view_cvsgraph().
#os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib:/path/to/cvsgraph'
rcsfile = request.repos.rcsfile(request.path_parts)
fp = popen.popen(cfg.utilities.cvsgraph or 'cvsgraph',
("-c", cfg.path(cfg.options.cvsgraph_conf),
"-r", request.repos.rootpath,
cvsgraph_extraopts(request),
rcsfile), 'rb', 0)
copy_stream(fp, get_writeready_server_file(request, 'image/png'))
fp.close()
def view_cvsgraph(request):
"output a page containing an image rendered by cvsgraph"
cfg = request.cfg
if not cfg.options.use_cvsgraph:
raise debug.ViewVCException('Graph view is disabled', '403 Forbidden')
# If cvsgraph can't find its supporting libraries, uncomment and set
# accordingly. Do the same in view_cvsgraph_image().
#os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib:/path/to/cvsgraph'
imagesrc = request.get_url(view_func=view_cvsgraph_image, escape=1)
mime_type = guess_mime(request.where)
view = default_view(mime_type, cfg)
up_where = _path_join(request.path_parts[:-1])
# Create an image map
rcsfile = request.repos.rcsfile(request.path_parts)
fp = popen.popen(cfg.utilities.cvsgraph or 'cvsgraph',
("-i",
"-c", cfg.path(cfg.options.cvsgraph_conf),
"-r", request.repos.rootpath,
"-x", "x",
"-3", request.get_url(view_func=view_log, params={},
escape=1),
"-4", request.get_url(view_func=view,
params={'revision': None},
escape=1, partial=1),
"-5", request.get_url(view_func=view_diff,
params={'r1': None, 'r2': None},
escape=1, partial=1),
"-6", request.get_url(view_func=view_directory,
where=up_where,
pathtype=vclib.DIR,
params={'pathrev': None},
escape=1, partial=1),
cvsgraph_extraopts(request),
rcsfile), 'rb', 0)
graph_action, graph_hidden_values = \
request.get_form(view_func=view_cvsgraph, params={})
data = common_template_data(request)
data.merge(TemplateData({
'imagemap' : fp,
'imagesrc' : imagesrc,
'graph_action' : graph_action,
'graph_hidden_values' : graph_hidden_values,
'opt_gflip' : ezt.boolean('invert' in cfg.options.allowed_cvsgraph_useropts),
'opt_gbbox' : ezt.boolean('branchbox' in cfg.options.allowed_cvsgraph_useropts),
'opt_gshow' : ezt.boolean('show' in cfg.options.allowed_cvsgraph_useropts),
'opt_gleft' : ezt.boolean('rotate' in cfg.options.allowed_cvsgraph_useropts),
'opt_gmaxtag' : ezt.boolean('limittags' in cfg.options.allowed_cvsgraph_useropts),
'gflip' : ezt.boolean(int(request.query_dict.get('gflip', 0))),
'gbbox' : ezt.boolean(int(request.query_dict.get('gbbox', 0))),
'gleft' : ezt.boolean(int(request.query_dict.get('gleft', 0))),
'gmaxtag' : request.query_dict.get('gmaxtag', 0),
'gshow' : cvsgraph_normalize_gshow(request),
}))
generate_page(request, "graph", data)
def search_file(repos, path_parts, rev, search_re):
"""Return 1 iff the contents of the file at PATH_PARTS in REPOS as
of revision REV matches regular expression SEARCH_RE."""
# Read in each line of a checked-out file, and then use re.search to
# search line.
fp = repos.openfile(path_parts, rev, {})[0]
matches = 0
while 1:
line = fp.readline()
if not line:
break
if search_re.search(line):
matches = 1
fp.close()
break
return matches
def view_doc(request):
"""Serve ViewVC static content locally.
Using this avoids the need for modifying the setup of the web server.
"""
cfg = request.cfg
document = request.where
filename = cfg.path(os.path.join(cfg.options.template_dir,
"docroot", document))
# Stat the file to get content length and last-modified date.
try:
info = os.stat(filename)
except OSError, v:
raise debug.ViewVCException('Static file "%s" not available (%s)'
% (document, str(v)), '404 Not Found')
content_length = str(info[stat.ST_SIZE])
last_modified = info[stat.ST_MTIME]
# content_length + mtime makes a pretty good etag.
if check_freshness(request, last_modified,
"%s-%s" % (content_length, last_modified)):
return
try:
fp = open(filename, "rb")
except IOError, v:
raise debug.ViewVCException('Static file "%s" not available (%s)'
% (document, str(v)), '404 Not Found')
if document[-3:] == 'png':
mime_type = 'image/png'
elif document[-3:] == 'jpg':
mime_type = 'image/jpeg'
elif document[-3:] == 'gif':
mime_type = 'image/gif'
elif document[-3:] == 'css':
mime_type = 'text/css'
elif document[-3:] == 'txt':
mime_type = 'text/plain'
elif document[-3:] == 'ico':
mime_type = 'image/x-icon'
else: # assume HTML:
mime_type = None
copy_stream(fp, get_writeready_server_file(request, mime_type,
content_length=content_length))
fp.close()
def rcsdiff_date_reformat(date_str, cfg):
if date_str is None:
return None
try:
date = vclib.ccvs.cvs_strptime(date_str)
except ValueError:
return date_str
return make_time_string(calendar.timegm(date), cfg)
_re_extract_rev = re.compile(r'^[-+*]{3} [^\t]+\t([^\t]+)\t((\d+\.)*\d+)$')
_re_extract_info = re.compile(r'@@ \-([0-9]+).*\+([0-9]+).*@@(.*)')
class DiffSource:
def __init__(self, fp, cfg):
self.fp = fp
self.cfg = cfg
self.save_line = None
self.line_number = None
self.prev_line_number = None
# keep track of where we are during an iteration
self.idx = -1
self.last = None
# these will be set once we start reading
self.state = 'no-changes'
self.left_col = [ ]
self.right_col = [ ]
def __getitem__(self, idx):
if idx == self.idx:
return self.last
if idx != self.idx + 1:
raise DiffSequencingError()
# keep calling _get_row until it gives us something. sometimes, it
# doesn't return a row immediately because it is accumulating changes.
# when it is out of data, _get_row will raise IndexError.
while 1:
item = self._get_row()
if item:
self.idx = idx
self.last = item
return item
def _format_text(self, text):
text = text.rstrip('\r\n')
if self.cfg.options.tabsize > 0:
text = text.expandtabs(self.cfg.options.tabsize)
hr_breakable = self.cfg.options.hr_breakable
# in the code below, "\x01" will be our stand-in for "&". We don't want
# to insert "&" because it would get escaped by sapi.escape(). Similarly,
# we use "\x02" as a stand-in for "<br>"
if hr_breakable > 1 and len(text) > hr_breakable:
text = re.sub('(' + ('.' * hr_breakable) + ')', '\\1\x02', text)
if hr_breakable:
# make every other space "breakable"
text = text.replace(' ', ' \x01nbsp;')
else:
text = text.replace(' ', '\x01nbsp;')
text = sapi.escape(text)
text = text.replace('\x01', '&')
text = text.replace('\x02', '<span style="color:red">\</span><br />')
return text
def _get_row(self):
if self.state[:5] == 'flush':
item = self._flush_row()
if item:
return item
self.state = 'dump'
if self.save_line:
line = self.save_line
self.save_line = None
else:
line = self.fp.readline()
if not line:
if self.state == 'no-changes':
self.state = 'done'
return _item(type=_RCSDIFF_NO_CHANGES)
# see if there are lines to flush
if self.left_col or self.right_col:
# move into the flushing state
self.state = 'flush-' + self.state
return None
# nothing more to return
raise IndexError
if line[:2] == '@@':
self.state = 'dump'
self.left_col = [ ]
self.right_col = [ ]
match = _re_extract_info.match(line)
self.line_number = int(match.group(2)) - 1
self.prev_line_number = int(match.group(1)) - 1
return _item(type='header',
line_info_left=match.group(1),
line_info_right=match.group(2),
line_info_extra=self._format_text(match.group(3)))
if line[0] == '\\':
# \ No newline at end of file
# Just skip. This code used to move to flush state, but that resulted in
# changes being displayed as removals-and-readditions.
return None
diff_code = line[0]
output = self._format_text(line[1:])
if diff_code == '+':
if self.state == 'dump':
self.line_number = self.line_number + 1
return _item(type='add', right=output, line_number=self.line_number)
self.state = 'pre-change-add'
self.right_col.append(output)
return None
if diff_code == '-':
self.state = 'pre-change-remove'
self.left_col.append(output)
return None # early exit to avoid line in
if self.left_col or self.right_col:
# save the line for processing again later, and move into the
# flushing state
self.save_line = line
self.state = 'flush-' + self.state
return None
self.line_number = self.line_number + 1
self.prev_line_number = self.prev_line_number + 1
return _item(type='context', left=output, right=output,
line_number=self.line_number)
def _flush_row(self):
if not self.left_col and not self.right_col:
# nothing more to flush
return None
if self.state == 'flush-pre-change-remove':
self.prev_line_number = self.prev_line_number + 1
return _item(type='remove', left=self.left_col.pop(0),
line_number=self.prev_line_number)
# state == flush-pre-change-add
item = _item(type='change',
have_left=ezt.boolean(0),
have_right=ezt.boolean(0))
if self.left_col:
self.prev_line_number = self.prev_line_number + 1
item.have_left = ezt.boolean(1)
item.left = self.left_col.pop(0)
item.line_number = self.prev_line_number
if self.right_col:
self.line_number = self.line_number + 1
item.have_right = ezt.boolean(1)
item.right = self.right_col.pop(0)
item.line_number = self.line_number
return item
class DiffSequencingError(Exception):
pass
def diff_parse_headers(fp, diff_type, path1, path2, rev1, rev2,
sym1=None, sym2=None):
date1 = date2 = log_rev1 = log_rev2 = flag = None
header_lines = []
if diff_type == vclib.UNIFIED:
f1 = '--- '
f2 = '+++ '
elif diff_type == vclib.CONTEXT:
f1 = '*** '
f2 = '--- '
else:
f1 = f2 = None
# If we're parsing headers, then parse and tweak the diff headers,
# collecting them in an array until we've read and handled them all.
if f1 and f2:
parsing = 1
flag = _RCSDIFF_NO_CHANGES
len_f1 = len(f1)
len_f2 = len(f2)
while parsing:
line = fp.readline()
if not line:
break
# Saw at least one line in the stream
flag = None
if line[:len(f1)] == f1:
match = _re_extract_rev.match(line)
if match:
date1 = match.group(1)
log_rev1 = match.group(2)
line = '%s%s\t%s\t%s%s\n' % (f1, path1, date1, log_rev1,
sym1 and ' ' + sym1 or '')
elif line[:len(f2)] == f2:
match = _re_extract_rev.match(line)
if match:
date2 = match.group(1)
log_rev2 = match.group(2)
line = '%s%s\t%s\t%s%s\n' % (f2, path2, date2, log_rev2,
sym2 and ' ' + sym2 or '')
parsing = 0
elif line[:3] == 'Bin':
flag = _RCSDIFF_IS_BINARY
parsing = 0
elif (line.find('not found') != -1 or
line.find('illegal option') != -1):
flag = _RCSDIFF_ERROR
parsing = 0
header_lines.append(line)
if (log_rev1 and log_rev1 != rev1):
raise debug.ViewVCException('rcsdiff found revision %s, but expected '
'revision %s' % (log_rev1, rev1),
'500 Internal Server Error')
if (log_rev2 and log_rev2 != rev2):
raise debug.ViewVCException('rcsdiff found revision %s, but expected '
'revision %s' % (log_rev2, rev2),
'500 Internal Server Error')
return date1, date2, flag, ''.join(header_lines)
def _get_diff_path_parts(request, query_key, rev, base_rev):
repos = request.repos
if request.query_dict.has_key(query_key):
parts = _path_parts(request.query_dict[query_key])
elif request.roottype == 'svn':
try:
parts = _path_parts(repos.get_location(request.where,
repos._getrev(base_rev),
repos._getrev(rev)))
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
except vclib.ItemNotFound:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
else:
parts = request.path_parts
return parts
def setup_diff(request):
query_dict = request.query_dict
rev1 = r1 = query_dict['r1']
rev2 = r2 = query_dict['r2']
sym1 = sym2 = None
# hack on the diff revisions
if r1 == 'text':
rev1 = query_dict.get('tr1', None)
if not rev1:
raise debug.ViewVCException('Missing revision from the diff '
'form text field', '400 Bad Request')
else:
idx = r1.find(':')
if idx == -1:
rev1 = r1
else:
rev1 = r1[:idx]
sym1 = r1[idx+1:]
if r2 == 'text':
rev2 = query_dict.get('tr2', None)
if not rev2:
raise debug.ViewVCException('Missing revision from the diff '
'form text field', '400 Bad Request')
sym2 = ''
else:
idx = r2.find(':')
if idx == -1:
rev2 = r2
else:
rev2 = r2[:idx]
sym2 = r2[idx+1:]
if request.roottype == 'svn':
try:
rev1 = str(request.repos._getrev(rev1))
rev2 = str(request.repos._getrev(rev2))
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid revision(s) passed to diff',
'400 Bad Request')
p1 = _get_diff_path_parts(request, 'p1', rev1, request.pathrev)
p2 = _get_diff_path_parts(request, 'p2', rev2, request.pathrev)
try:
if revcmp(rev1, rev2) > 0:
rev1, rev2 = rev2, rev1
sym1, sym2 = sym2, sym1
p1, p2 = p2, p1
except ValueError:
raise debug.ViewVCException('Invalid revision(s) passed to diff',
'400 Bad Request')
return p1, p2, rev1, rev2, sym1, sym2
def view_patch(request):
if 'diff' not in request.cfg.options.allowed_views:
raise debug.ViewVCException('Diff generation is disabled',
'403 Forbidden')
cfg = request.cfg
query_dict = request.query_dict
p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request)
mime_type1, encoding1 = calculate_mime_type(request, p1, rev1)
mime_type2, encoding2 = calculate_mime_type(request, p2, rev2)
if is_binary_file_mime_type(mime_type1, cfg) or \
is_binary_file_mime_type(mime_type2, cfg):
raise debug.ViewVCException('Display of binary file content disabled '
'by configuration', '403 Forbidden')
# In the absence of a format dictation in the CGI params, we'll let
# use the configured diff format, allowing 'c' to mean 'c' and
# anything else to mean 'u'.
format = query_dict.get('diff_format',
cfg.options.diff_format == 'c' and 'c' or 'u')
if format == 'c':
diff_type = vclib.CONTEXT
elif format == 'u':
diff_type = vclib.UNIFIED
else:
raise debug.ViewVCException('Diff format %s not understood'
% format, '400 Bad Request')
# Set some diff options. (Are there other options folks might want?
# Maybe not. For a patch, perhaps the precise change is ideal.)
diff_options = {}
diff_options['funout'] = cfg.options.hr_funout
try:
fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type, diff_options)
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
path_left = _path_join(p1)
path_right = _path_join(p2)
date1, date2, flag, headers = diff_parse_headers(fp, diff_type,
path_left, path_right,
rev1, rev2, sym1, sym2)
server_fp = get_writeready_server_file(request, 'text/plain')
server_fp.write(headers)
copy_stream(fp, server_fp)
fp.close()
def diff_side_item(request, path_comp, rev, sym):
'''Prepare information about left/right side of the diff. Prepare two flavors,
for content and for property diffs.'''
# TODO: Is the slice necessary, or is limit enough?
options = {'svn_show_all_dir_logs': 1}
log_entry = request.repos.itemlog(path_comp, rev, vclib.SORTBY_REV,
0, 1, options)[-1]
ago = log_entry.date is not None \
and html_time(request, log_entry.date, 1) or None
path_joined = _path_join(path_comp)
lf = LogFormatter(request, log_entry.log)
# Item for property diff: no hrefs, there's no view
# to download/annotate property
i_prop = _item(log_entry=log_entry,
date=make_time_string(log_entry.date, request.cfg),
author=log_entry.author,
log = lf.get(maxlen=0, htmlize=1),
size=log_entry.size,
ago=ago,
path=path_joined,
path_comp=path_comp,
rev=rev,
tag=sym,
view_href=None,
download_href=None,
download_text_href=None,
annotate_href=None,
revision_href=None,
prefer_markup=ezt.boolean(0))
# Content diff item is based on property diff, with URIs added
fvi = get_file_view_info(request, path_joined, rev)
i_content = copy.copy(i_prop)
i_content.view_href = fvi.view_href
i_content.download_href = fvi.download_href
i_content.download_text_href = fvi.download_text_href
i_content.annotate_href = fvi.annotate_href
i_content.revision_href = fvi.revision_href
i_content.prefer_markup = fvi.prefer_markup
# Property diff item has properties hash, naturally. Content item doesn't.
i_content.properties = None
i_prop.properties = request.repos.itemprops(path_comp, rev)
return i_content, i_prop
class DiffDescription:
def __init__(self, request):
cfg = request.cfg
query_dict = request.query_dict
self.diff_format = query_dict.get('diff_format', cfg.options.diff_format)
self.human_readable = 0
self.hide_legend = 0
self.line_differ = None
self.fp_differ = None
self.request = request
self.context = -1
self.changes = []
if self.diff_format == 'c':
self.diff_type = vclib.CONTEXT
self.hide_legend = 1
elif self.diff_format == 's':
self.diff_type = vclib.SIDE_BY_SIDE
self.hide_legend = 1
elif self.diff_format == 'l':
self.diff_type = vclib.UNIFIED
self.context = 15
self.human_readable = 1
elif self.diff_format == 'f':
self.diff_type = vclib.UNIFIED
self.context = None
self.human_readable = 1
elif self.diff_format == 'h':
self.diff_type = vclib.UNIFIED
self.human_readable = 1
elif self.diff_format == 'u':
self.diff_type = vclib.UNIFIED
self.hide_legend = 1
else:
raise debug.ViewVCException('Diff format %s not understood'
% self.diff_format, '400 Bad Request')
# Determine whether idiff is avaialble and whether it could be used.
# idiff only supports side-by-side (conditionally) and unified formats,
# and is only used if intra-line diffs are requested.
if (cfg.options.hr_intraline and idiff
and ((self.human_readable and idiff.sidebyside)
or (not self.human_readable and self.diff_type == vclib.UNIFIED))):
# Override hiding legend for unified format. It is not marked 'human
# readable', and it is displayed differently depending on whether
# hr_intraline is disabled (displayed as raw diff) or enabled
# (displayed as colored). What a royal mess... Issue #301 should
# at some time address it; at that time, human_readable and hide_legend
# controls should both be merged into one, 'is_colored' or something.
self.hide_legend = 0
if self.human_readable:
self.line_differ = self._line_idiff_sidebyside
self.diff_block_format = 'sidebyside-2'
else:
self.line_differ = self._line_idiff_unified
self.diff_block_format = 'unified'
else:
if self.human_readable:
self.diff_block_format = 'sidebyside-1'
self.fp_differ = self._fp_vclib_hr
else:
self.diff_block_format = 'raw'
self.fp_differ = self._fp_vclib_raw
def anchor(self, anchor_name):
self.changes.append(_item(diff_block_format='anchor', anchor=anchor_name))
def get_content_diff(self, left, right):
cfg = self.request.cfg
diff_options = {}
if self.context != -1:
diff_options['context'] = self.context
if self.human_readable or self.diff_format == 'u':
diff_options['funout'] = cfg.options.hr_funout
if self.human_readable:
diff_options['ignore_white'] = cfg.options.hr_ignore_white
diff_options['ignore_keyword_subst'] = \
cfg.options.hr_ignore_keyword_subst
self._get_diff(left, right, self._content_lines, self._content_fp,
diff_options, None)
def get_prop_diff(self, left, right):
diff_options = {}
if self.context != -1:
diff_options['context'] = self.context
if self.human_readable:
cfg = self.request.cfg
diff_options['ignore_white'] = cfg.options.hr_ignore_white
for name in self._uniq(left.properties.keys() + right.properties.keys()):
# Skip non-utf8 property names
if is_undisplayable(name):
continue
val_left = left.properties.get(name, '')
val_right = right.properties.get(name, '')
# Skip non-changed properties
if val_left == val_right:
continue
# Check for binary properties
if is_undisplayable(val_left) or is_undisplayable(val_right):
self.changes.append(_item(left=left,
right=right,
diff_block_format=self.diff_block_format,
changes=[ _item(type=_RCSDIFF_IS_BINARY) ],
propname=name))
continue
self._get_diff(left, right, self._prop_lines, self._prop_fp,
diff_options, name)
def _get_diff(self, left, right, get_lines, get_fp, diff_options, propname):
if self.fp_differ is not None:
fp = get_fp(left, right, propname, diff_options)
changes = self.fp_differ(left, right, fp, propname)
else:
lines_left = get_lines(left, propname)
lines_right = get_lines(right, propname)
changes = self.line_differ(lines_left, lines_right, diff_options)
self.changes.append(_item(left=left,
right=right,
changes=changes,
diff_block_format=self.diff_block_format,
propname=propname))
def _line_idiff_sidebyside(self, lines_left, lines_right, diff_options):
return idiff.sidebyside(lines_left, lines_right,
diff_options.get("context", 5))
def _line_idiff_unified(self, lines_left, lines_right, diff_options):
return idiff.unified(lines_left, lines_right,
diff_options.get("context", 2))
def _fp_vclib_hr(self, left, right, fp, propname):
date1, date2, flag, headers = \
diff_parse_headers(fp, self.diff_type,
self._property_path(left, propname),
self._property_path(right, propname),
left.rev, right.rev, left.tag, right.tag)
if flag is not None:
return [ _item(type=flag) ]
else:
return DiffSource(fp, self.request.cfg)
def _fp_vclib_raw(self, left, right, fp, propname):
date1, date2, flag, headers = \
diff_parse_headers(fp, self.diff_type,
self._property_path(left, propname),
self._property_path(right, propname),
left.rev, right.rev, left.tag, right.tag)
if flag is not None:
return _item(type=flag)
else:
return _item(type='raw', raw=MarkupPipeWrapper(fp,
self.request.server.escape(headers), None, 1))
def _content_lines(self, side, propname):
f = self.request.repos.openfile(side.path_comp, side.rev, {})[0]
try:
lines = f.readlines()
finally:
f.close()
return lines
def _content_fp(self, left, right, propname, diff_options):
return self.request.repos.rawdiff(left.path_comp, left.rev,
right.path_comp, right.rev,
self.diff_type, diff_options)
def _prop_lines(self, side, propname):
val = side.properties.get(propname, '')
return val.splitlines()
def _prop_fp(self, left, right, propname, diff_options):
fn_left = self._temp_file(left.properties.get(propname))
fn_right = self._temp_file(right.properties.get(propname))
diff_args = vclib._diff_args(self.diff_type, diff_options)
info_left = self._property_path(left, propname), \
left.log_entry.date, left.rev
info_right = self._property_path(right, propname), \
right.log_entry.date, right.rev
return vclib._diff_fp(fn_left, fn_right, info_left, info_right,
self.request.cfg.utilities.diff or 'diff', diff_args)
def _temp_file(self, val):
'''Create a temporary file with content from val'''
fn = tempfile.mktemp()
fp = open(fn, "wb")
if val:
fp.write(val)
fp.close()
return fn
def _uniq(self, lst):
'''Determine unique set of list elements'''
h = {}
for e in lst:
h[e] = 1
return sorted(h.keys())
def _property_path(self, side, propname):
'''Return path to be displayed in raw diff - possibly augmented with
property name'''
if propname is None:
return side.path
else:
return "%s:property(%s)" % (side.path, propname)
def view_diff(request):
if 'diff' not in request.cfg.options.allowed_views:
raise debug.ViewVCException('Diff generation is disabled',
'403 Forbidden')
cfg = request.cfg
p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request)
mime_type1, encoding1 = calculate_mime_type(request, p1, rev1)
mime_type2, encoding2 = calculate_mime_type(request, p2, rev2)
if is_binary_file_mime_type(mime_type1, cfg) or \
is_binary_file_mime_type(mime_type2, cfg):
raise debug.ViewVCException('Display of binary file content disabled '
'by configuration', '403 Forbidden')
# since templates are in use and subversion allows changes to the dates,
# we can't provide a strong etag
if check_freshness(request, None, '%s-%s' % (rev1, rev2), weak=1):
return
left_side_content, left_side_prop = diff_side_item(request, p1, rev1, sym1)
right_side_content, right_side_prop = diff_side_item(request, p2, rev2, sym2)
desc = DiffDescription(request)
try:
if request.pathtype == vclib.FILE:
# Get file content diff
desc.anchor("content")
desc.get_content_diff(left_side_content, right_side_content)
# Get property list and diff each property
desc.anchor("properties")
desc.get_prop_diff(left_side_prop, right_side_prop)
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
no_format_params = request.query_dict.copy()
no_format_params['diff_format'] = None
diff_format_action, diff_format_hidden_values = \
request.get_form(params=no_format_params)
data = common_template_data(request)
data.merge(TemplateData({
'diffs' : desc.changes,
'diff_format' : desc.diff_format,
'hide_legend' : ezt.boolean(desc.hide_legend),
'patch_href' : request.get_url(view_func=view_patch,
params=no_format_params,
escape=1),
'diff_format_action' : diff_format_action,
'diff_format_hidden_values' : diff_format_hidden_values,
}))
generate_page(request, "diff", data)
def generate_tarball_header(out, name, size=0, mode=None, mtime=0,
uid=0, gid=0, typeflag=None, linkname='',
uname='viewvc', gname='viewvc',
devmajor=1, devminor=0, prefix=None,
magic='ustar', version='00', chksum=None):
if not mode:
if name[-1:] == '/':
mode = 0755
else:
mode = 0644
if not typeflag:
if linkname:
typeflag = '2' # symbolic link
elif name[-1:] == '/':
typeflag = '5' # directory
else:
typeflag = '0' # regular file
if not prefix:
prefix = ''
# generate a GNU tar extension header for a long name.
if len(name) >= 100:
generate_tarball_header(out, '././@LongLink', len(name),
0, 0, 0, 0, 'L')
out.write(name)
out.write('\0' * (511 - ((len(name) + 511) % 512)))
# generate a GNU tar extension header for a long symlink name.
if len(linkname) >= 100:
generate_tarball_header(out, '././@LongLink', len(linkname),
0, 0, 0, 0, 'K')
out.write(linkname)
out.write('\0' * (511 - ((len(linkname) + 511) % 512)))
block1 = struct.pack('100s 8s 8s 8s 12s 12s',
name,
'%07o' % mode,
'%07o' % uid,
'%07o' % gid,
'%011o' % size,
'%011o' % mtime)
block2 = struct.pack('c 100s 6s 2s 32s 32s 8s 8s 155s',
typeflag,
linkname,
magic,
version,
uname,
gname,
'%07o' % devmajor,
'%07o' % devminor,
prefix)
if not chksum:
dummy_chksum = ' '
block = block1 + dummy_chksum + block2
chksum = 0
for i in range(len(block)):
chksum = chksum + ord(block[i])
block = block1 + struct.pack('8s', '%07o' % chksum) + block2
block = block + '\0' * (512 - len(block))
out.write(block)
def generate_tarball(out, request, reldir, stack, dir_mtime=None):
# get directory info from repository
rep_path = request.path_parts + reldir
entries = request.repos.listdir(rep_path, request.pathrev, {})
request.repos.dirlogs(rep_path, request.pathrev, entries, {})
entries.sort(lambda a, b: cmp(a.name, b.name))
# figure out corresponding path in tar file. everything gets put underneath
# a single top level directory named after the repository directory being
# tarred
if request.path_parts:
tar_dir = request.path_parts[-1] + '/'
else:
# Don't handle context as a directory in the tar ball.
root_path_parts = _path_parts(request.rootname)
tar_dir = root_path_parts[-1] + '/'
if reldir:
tar_dir = tar_dir + _path_join(reldir) + '/'
cvs = request.roottype == 'cvs'
# If our caller doesn't dictate a datestamp to use for the current
# directory, its datestamps will be the youngest of the datestamps
# of versioned items in that subdirectory. We'll be ignoring dead
# or busted items and, in CVS, subdirs.
if dir_mtime is None:
dir_mtime = 0
for file in entries:
if cvs and (file.kind != vclib.FILE or file.rev is None or file.dead):
continue
if (file.date is not None) and (file.date > dir_mtime):
dir_mtime = file.date
# Push current directory onto the stack.
stack.append(tar_dir)
# If this is Subversion, we generate a header for this directory
# regardless of its contents. For CVS it will only get into the
# tarball if it has files underneath it, which we determine later.
if not cvs:
generate_tarball_header(out, tar_dir, mtime=dir_mtime)
# Run through the files in this directory, skipping busted and
# unauthorized ones.
for file in entries:
if file.kind != vclib.FILE:
continue
if cvs and (file.rev is None or file.dead):
continue
# If we get here, we've seen at least one valid file in the
# current directory. For CVS, we need to make sure there are
# directory parents to contain it, so we flush the stack.
if cvs:
for dir in stack:
generate_tarball_header(out, dir, mtime=dir_mtime)
del stack[:]
# Calculate the mode for the file. Sure, we could look directly
# at the ,v file in CVS, but that's a layering violation we'd like
# to avoid as much as possible.
if request.repos.isexecutable(rep_path + [file.name], request.pathrev):
mode = 0755
else:
mode = 0644
# Is this thing a symlink?
#
### FIXME: A better solution would be to have vclib returning
### symlinks with a new vclib.SYMLINK path type.
symlink_target = None
if hasattr(request.repos, 'get_symlink_target'):
symlink_target = request.repos.get_symlink_target(rep_path + [file.name],
request.pathrev)
# If the object is a symlink, generate the appropriate header.
# Otherwise, we're dealing with a regular file.
if symlink_target:
generate_tarball_header(out, tar_dir + file.name, 0, mode,
file.date is not None and file.date or 0,
typeflag='2', linkname=symlink_target)
else:
filesize = request.repos.filesize(rep_path + [file.name], request.pathrev)
if filesize == -1:
# Bummer. We have to calculate the filesize manually.
fp = request.repos.openfile(rep_path + [file.name], request.pathrev, {})[0]
filesize = 0
while 1:
chunk = retry_read(fp)
if not chunk:
break
filesize = filesize + len(chunk)
fp.close()
# Write the tarball header...
generate_tarball_header(out, tar_dir + file.name, filesize, mode,
file.date is not None and file.date or 0)
# ...the file's contents ...
fp = request.repos.openfile(rep_path + [file.name], request.pathrev, {})[0]
while 1:
chunk = retry_read(fp)
if not chunk:
break
out.write(chunk)
fp.close()
# ... and then add the block padding.
out.write('\0' * (511 - (filesize + 511) % 512))
# Recurse into subdirectories, skipping busted and unauthorized (or
# configured-to-be-hidden) ones.
for file in entries:
if file.errors or file.kind != vclib.DIR:
continue
if request.cfg.options.hide_cvsroot \
and is_cvsroot_path(request.roottype, rep_path + [file.name]):
continue
mtime = request.roottype == 'svn' and file.date or None
generate_tarball(out, request, reldir + [file.name], stack, mtime)
# Pop the current directory from the stack.
del stack[-1:]
def download_tarball(request):
cfg = request.cfg
if 'tar' not in request.cfg.options.allowed_views:
raise debug.ViewVCException('Tarball generation is disabled',
'403 Forbidden')
# If debugging, we just need to open up the specified tar path for
# writing. Otherwise, we get a writeable server output stream --
# disabling any default compression thereupon -- and wrap that in
# our own gzip stream wrapper.
if debug.TARFILE_PATH:
fp = open(debug.TARFILE_PATH, 'w')
else:
tarfile = request.rootname
if request.path_parts:
tarfile = "%s-%s" % (tarfile, request.path_parts[-1])
request.server.addheader('Content-Disposition',
'attachment; filename="%s.tar.gz"' % (tarfile))
server_fp = get_writeready_server_file(request, 'application/x-gzip',
allow_compress=False)
request.server.flush()
fp = gzip.GzipFile('', 'wb', 9, server_fp)
### FIXME: For Subversion repositories, we can get the real mtime of the
### top-level directory here.
generate_tarball(fp, request, [], [])
fp.write('\0' * 1024)
fp.close()
if debug.TARFILE_PATH:
request.server.header('')
print """
<html>
<body>
<p>Tarball '%s' successfully generated!</p>
</body>
</html>""" % (debug.TARFILE_PATH)
def view_revision(request):
if request.roottype != "svn":
raise debug.ViewVCException("Revision view not supported for CVS "
"repositories at this time.",
"400 Bad Request")
cfg = request.cfg
query_dict = request.query_dict
try:
rev = request.repos._getrev(query_dict.get('revision'))
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid revision', '404 Not Found')
youngest_rev = request.repos.get_youngest_revision()
# The revision number acts as a weak validator (but we tell browsers
# not to cache the youngest revision).
if rev != youngest_rev and check_freshness(request, None, str(rev), weak=1):
return
# Fetch the revision information.
date, author, msg, revprops, changes = request.repos.revinfo(rev)
date_str = make_time_string(date, cfg)
# Fix up the revprops list (rather like get_itemprops()).
propnames = revprops.keys()
propnames.sort()
props = []
for name in propnames:
# skip non-utf8 property names
if is_undisplayable(name):
continue
lf = LogFormatter(request, revprops[name])
value = lf.get(maxlen=0, htmlize=1)
# note non-utf8 property values
undisplayable = is_undisplayable(value)
if undisplayable:
value = None
props.append(_item(name=name, value=value,
undisplayable=ezt.boolean(undisplayable)))
# Sort the changes list by path.
def changes_sort_by_path(a, b):
return cmp(a.path_parts, b.path_parts)
changes.sort(changes_sort_by_path)
# Handle limit_changes parameter
cfg_limit_changes = cfg.options.limit_changes
limit_changes = int(query_dict.get('limit_changes', cfg_limit_changes))
more_changes = None
more_changes_href = None
first_changes = None
first_changes_href = None
num_changes = len(changes)
if limit_changes and len(changes) > limit_changes:
more_changes = len(changes) - limit_changes
params = query_dict.copy()
params['limit_changes'] = 0
more_changes_href = request.get_url(params=params, escape=1)
changes = changes[:limit_changes]
elif cfg_limit_changes and len(changes) > cfg_limit_changes:
first_changes = cfg_limit_changes
params = query_dict.copy()
params['limit_changes'] = None
first_changes_href = request.get_url(params=params, escape=1)
# Add the hrefs, types, and prev info
for change in changes:
change.view_href = change.diff_href = change.type = change.log_href = None
# If the path is newly added, don't claim text or property
# modifications.
if (change.action == vclib.ADDED or change.action == vclib.REPLACED) \
and not change.copied:
change.text_changed = 0
change.props_changed = 0
# Calculate the view link URLs (for which we must have a pathtype).
if change.pathtype:
view_func = None
if change.pathtype is vclib.FILE \
and 'markup' in cfg.options.allowed_views:
view_func = view_markup
elif change.pathtype is vclib.DIR:
view_func = view_directory
path = _path_join(change.path_parts)
base_path = _path_join(change.base_path_parts)
if change.action == vclib.DELETED:
link_rev = str(change.base_rev)
link_where = base_path
else:
link_rev = str(rev)
link_where = path
change.view_href = request.get_url(view_func=view_func,
where=link_where,
pathtype=change.pathtype,
params={'pathrev' : link_rev},
escape=1)
change.log_href = request.get_url(view_func=view_log,
where=link_where,
pathtype=change.pathtype,
params={'pathrev' : link_rev},
escape=1)
if (change.pathtype is vclib.FILE and change.text_changed) \
or change.props_changed:
change.diff_href = request.get_url(view_func=view_diff,
where=path,
pathtype=change.pathtype,
params={'pathrev' : str(rev),
'r1' : str(rev),
'r2' : str(change.base_rev),
},
escape=1)
# use same variable names as the log template
change.path = _path_join(change.path_parts)
change.copy_path = _path_join(change.base_path_parts)
change.copy_rev = change.base_rev
change.text_mods = ezt.boolean(change.text_changed)
change.prop_mods = ezt.boolean(change.props_changed)
change.is_copy = ezt.boolean(change.copied)
change.pathtype = (change.pathtype == vclib.FILE and 'file') \
or (change.pathtype == vclib.DIR and 'dir') \
or None
del change.path_parts
del change.base_path_parts
del change.base_rev
del change.text_changed
del change.props_changed
del change.copied
prev_rev_href = next_rev_href = None
if rev > 0:
prev_rev_href = request.get_url(view_func=view_revision,
where=None,
pathtype=None,
params={'revision': str(rev - 1)},
escape=1)
if rev < request.repos.get_youngest_revision():
next_rev_href = request.get_url(view_func=view_revision,
where=None,
pathtype=None,
params={'revision': str(rev + 1)},
escape=1)
jump_rev_action, jump_rev_hidden_values = \
request.get_form(params={'revision': None})
lf = LogFormatter(request, msg)
data = common_template_data(request)
data.merge(TemplateData({
'rev' : str(rev),
'author' : author,
'date' : date_str,
'log' : lf.get(maxlen=0, htmlize=1),
'properties' : props,
'ago' : date is not None and html_time(request, date, 1) or None,
'changes' : changes,
'prev_href' : prev_rev_href,
'next_href' : next_rev_href,
'num_changes' : num_changes,
'limit_changes': limit_changes,
'more_changes': more_changes,
'more_changes_href': more_changes_href,
'first_changes': first_changes,
'first_changes_href': first_changes_href,
'jump_rev_action' : jump_rev_action,
'jump_rev_hidden_values' : jump_rev_hidden_values,
'revision_href' : request.get_url(view_func=view_revision,
where=None,
pathtype=None,
params={'revision': str(rev)},
escape=1),
}))
if rev == youngest_rev:
request.server.addheader("Cache-control", "no-store")
generate_page(request, "revision", data)
def is_query_supported(request):
"""Returns true if querying is supported for the given path."""
return request.cfg.cvsdb.enabled \
and request.pathtype == vclib.DIR \
and request.roottype in ['cvs', 'svn']
def is_querydb_nonempty_for_root(request):
"""Return 1 iff commits database integration is supported *and* the
current root is found in that database. Only does this check if
check_database is set to 1."""
if request.cfg.cvsdb.enabled and request.roottype in ['cvs', 'svn']:
if request.cfg.cvsdb.check_database_for_root:
global cvsdb
import cvsdb
db = cvsdb.ConnectDatabaseReadOnly(request.cfg)
repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath)
if repos_root:
return 1
else:
return 1
return 0
def validate_query_args(request):
# Do some additional input validation of query form arguments beyond
# what is offered by the CGI param validation loop in Request.run_viewvc().
for arg_base in ['branch', 'file', 'comment', 'who']:
# First, make sure the the XXX_match args have valid values:
arg_match = arg_base + '_match'
arg_match_value = request.query_dict.get(arg_match, 'exact')
if not arg_match_value in ('exact', 'like', 'glob', 'regex', 'notregex'):
raise debug.ViewVCException(
'An illegal value was provided for the "%s" parameter.'
% (arg_match),
'400 Bad Request')
# Now, for those args which are supposed to be regular expressions (per
# their corresponding XXX_match values), make sure they are.
if arg_match_value == 'regex' or arg_match_value == 'notregex':
arg_base_value = request.query_dict.get(arg_base)
if arg_base_value:
try:
re.compile(arg_base_value)
except:
raise debug.ViewVCException(
'An illegal value was provided for the "%s" parameter.'
% (arg_base),
'400 Bad Request')
def view_queryform(request):
if not is_query_supported(request):
raise debug.ViewVCException('Can not query project root "%s" at "%s".'
% (request.rootname, request.where),
'403 Forbidden')
# Do some more precise input validation.
validate_query_args(request)
query_action, query_hidden_values = \
request.get_form(view_func=view_query, params={'limit_changes': None})
limit_changes = \
int(request.query_dict.get('limit_changes',
request.cfg.options.limit_changes))
def escaped_query_dict_get(itemname, itemdefault=''):
return request.server.escape(request.query_dict.get(itemname, itemdefault))
data = common_template_data(request)
data.merge(TemplateData({
'branch' : escaped_query_dict_get('branch', ''),
'branch_match' : escaped_query_dict_get('branch_match', 'exact'),
'dir' : escaped_query_dict_get('dir', ''),
'file' : escaped_query_dict_get('file', ''),
'file_match' : escaped_query_dict_get('file_match', 'exact'),
'who' : escaped_query_dict_get('who', ''),
'who_match' : escaped_query_dict_get('who_match', 'exact'),
'comment' : escaped_query_dict_get('comment', ''),
'comment_match' : escaped_query_dict_get('comment_match', 'exact'),
'querysort' : escaped_query_dict_get('querysort', 'date'),
'date' : escaped_query_dict_get('date', 'hours'),
'hours' : escaped_query_dict_get('hours', '2'),
'mindate' : escaped_query_dict_get('mindate', ''),
'maxdate' : escaped_query_dict_get('maxdate', ''),
'query_action' : query_action,
'query_hidden_values' : query_hidden_values,
'limit_changes' : limit_changes,
'dir_href' : request.get_url(view_func=view_directory, params={},
escape=1),
}))
generate_page(request, "query_form", data)
def parse_date(datestr):
"""Parse a date string from the query form."""
match = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)(?:\ +'
'(\d\d):(\d\d)(?::(\d\d))?)?$', datestr)
if match:
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
hour = match.group(4)
if hour is not None:
hour = int(hour)
else:
hour = 0
minute = match.group(5)
if minute is not None:
minute = int(minute)
else:
minute = 0
second = match.group(6)
if second is not None:
second = int(second)
else:
second = 0
# return a "seconds since epoch" value assuming date given in UTC
tm = (year, month, day, hour, minute, second, 0, 0, 0)
return calendar.timegm(tm)
else:
return None
def english_query(request):
"""Generate a sentance describing the query."""
cfg = request.cfg
ret = [ 'Checkins ' ]
dir = request.query_dict.get('dir', '')
if dir:
ret.append('to ')
if ',' in dir:
ret.append('subdirectories')
else:
ret.append('subdirectory')
ret.append(' <em>%s</em> ' % request.server.escape(dir))
file = request.query_dict.get('file', '')
if file:
if len(ret) != 1:
ret.append('and ')
ret.append('to file <em>%s</em> ' % request.server.escape(file))
who = request.query_dict.get('who', '')
branch = request.query_dict.get('branch', '')
if branch:
ret.append('on branch <em>%s</em> ' % request.server.escape(branch))
else:
ret.append('on all branches ')
comment = request.query_dict.get('comment', '')
if comment:
ret.append('with comment <i>%s</i> ' % request.server.escape(comment))
if who:
ret.append('by <em>%s</em> ' % request.server.escape(who))
date = request.query_dict.get('date', 'hours')
if date == 'hours':
ret.append('in the last %s hours' \
% request.server.escape(request.query_dict.get('hours', '2')))
elif date == 'day':
ret.append('in the last day')
elif date == 'week':
ret.append('in the last week')
elif date == 'month':
ret.append('in the last month')
elif date == 'all':
ret.append('since the beginning of time')
elif date == 'explicit':
mindate = request.query_dict.get('mindate', '')
maxdate = request.query_dict.get('maxdate', '')
if mindate and maxdate:
w1, w2 = 'between', 'and'
else:
w1, w2 = 'since', 'before'
if mindate:
mindate = make_time_string(parse_date(mindate), cfg)
ret.append('%s <em>%s</em> ' % (w1, mindate))
if maxdate:
maxdate = make_time_string(parse_date(maxdate), cfg)
ret.append('%s <em>%s</em> ' % (w2, maxdate))
return ''.join(ret)
def prev_rev(rev):
"""Returns a string representing the previous revision of the argument."""
r = rev.split('.')
# decrement final revision component
r[-1] = str(int(r[-1]) - 1)
# prune if we pass the beginning of the branch
if len(r) > 2 and r[-1] == '0':
r = r[:-2]
return '.'.join(r)
def build_commit(request, files, max_files, dir_strip, format):
"""Return a commit object build from the information in FILES, or
None if no allowed files are present in the set. DIR_STRIP is the
path prefix to remove from the commit object's set of files. If
MAX_FILES is non-zero, it is used to limit the number of files
returned in the commit object. FORMAT is the requested output
format of the query request."""
cfg = request.cfg
author = files[0].GetAuthor()
date = files[0].GetTime()
desc = files[0].GetDescription()
commit_rev = files[0].GetRevision()
len_strip = len(dir_strip)
commit_files = []
num_allowed = 0
plus_count = 0
minus_count = 0
found_unreadable = 0
for f in files:
dirname = f.GetDirectory()
filename = f.GetFile()
if dir_strip:
assert dirname[:len_strip] == dir_strip
assert len(dirname) == len_strip or dirname[len(dir_strip)] == '/'
dirname = dirname[len_strip+1:]
where = dirname and ("%s/%s" % (dirname, filename)) or filename
rev = f.GetRevision()
rev_prev = prev_rev(rev)
commit_time = f.GetTime()
if commit_time:
commit_time = make_time_string(commit_time, cfg)
change_type = f.GetTypeString()
# In CVS, we can actually look at deleted revisions; in Subversion
# we can't -- we'll look at the previous revision instead.
exam_rev = rev
if request.roottype == 'svn' and change_type == 'Remove':
exam_rev = rev_prev
# Check path access (since the commits database logic bypasses the
# vclib layer and, thus, the vcauth stuff that layer uses).
path_parts = _path_parts(where)
if path_parts:
# Skip files in CVSROOT if asked to hide such.
if cfg.options.hide_cvsroot \
and is_cvsroot_path(request.roottype, path_parts):
found_unreadable = 1
continue
# We have to do a rare authz check here because this data comes
# from the CVSdb, not from the vclib providers.
#
# WARNING: The Subversion CVSdb integration logic is weak, weak,
# weak. It has no ability to track copies, so complex
# situations like a copied directory with a deleted subfile (all
# in the same revision) are very ... difficult. We've no choice
# but to omit as unauthorized paths the authorization logic
# can't find.
try:
readable = vclib.check_path_access(request.repos, path_parts,
None, exam_rev)
except vclib.ItemNotFound:
readable = 0
if not readable:
found_unreadable = 1
continue
if request.roottype == 'svn':
params = { 'pathrev': exam_rev }
else:
params = { 'revision': exam_rev, 'pathrev': f.GetBranch() or None }
dir_href = request.get_url(view_func=view_directory,
where=dirname, pathtype=vclib.DIR,
params=params, escape=1)
log_href = request.get_url(view_func=view_log,
where=where, pathtype=vclib.FILE,
params=params, escape=1)
diff_href = view_href = download_href = None
if 'markup' in cfg.options.allowed_views:
view_href = request.get_url(view_func=view_markup,
where=where, pathtype=vclib.FILE,
params=params, escape=1)
if 'co' in cfg.options.allowed_views:
download_href = request.get_url(view_func=view_checkout,
where=where, pathtype=vclib.FILE,
params=params, escape=1)
if change_type == 'Change':
diff_href_params = params.copy()
diff_href_params.update({
'r1': rev_prev,
'r2': rev,
'diff_format': None
})
diff_href = request.get_url(view_func=view_diff,
where=where, pathtype=vclib.FILE,
params=diff_href_params, escape=1)
mime_type, encoding = calculate_mime_type(request, path_parts, exam_rev)
prefer_markup = ezt.boolean(default_view(mime_type, cfg) == view_markup)
# Update plus/minus line change count.
plus = int(f.GetPlusCount())
minus = int(f.GetMinusCount())
plus_count = plus_count + plus
minus_count = minus_count + minus
num_allowed = num_allowed + 1
if max_files and num_allowed > max_files:
continue
commit_files.append(_item(date=commit_time,
dir=request.server.escape(dirname),
file=request.server.escape(filename),
author=request.server.escape(f.GetAuthor()),
rev=rev,
branch=f.GetBranch(),
plus=plus,
minus=minus,
type=change_type,
dir_href=dir_href,
log_href=log_href,
view_href=view_href,
download_href=download_href,
prefer_markup=prefer_markup,
diff_href=diff_href))
# No files survived authz checks? Let's just pretend this
# little commit didn't happen, shall we?
if not len(commit_files):
return None
commit = _item(num_files=len(commit_files), files=commit_files,
plus=plus_count, minus=minus_count)
commit.limited_files = ezt.boolean(num_allowed > len(commit_files))
# We'll mask log messages in commits which contain unreadable paths,
# but even that is kinda iffy. If a person searches for
# '/some/hidden/path' across log messages, then gets a response set
# that shows commits lacking log message, said person can reasonably
# assume that the log messages contained the hidden path, and that
# this is likely because they are referencing a real path in the
# repository -- a path the user isn't supposed to even know about.
if found_unreadable:
commit.log = None
commit.short_log = None
else:
lf = LogFormatter(request, desc)
htmlize = (format != 'rss')
commit.log = lf.get(maxlen=0, htmlize=htmlize)
commit.short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=htmlize)
commit.author = request.server.escape(author)
commit.rss_date = make_rss_time_string(date, request.cfg)
if request.roottype == 'svn':
commit.rev = commit_rev
commit.rss_url = '%s://%s%s' % \
(request.server.getenv("HTTPS") == "on" and "https" or "http",
request.server.getenv("HTTP_HOST"),
request.get_url(view_func=view_revision,
params={'revision': commit.rev},
escape=1))
else:
commit.rev = None
commit.rss_url = None
return commit
def query_backout(request, commits):
server_fp = get_writeready_server_file(request, 'text/plain')
if not commits:
server_fp.write("""\
# No changes were selected by the query.
# There is nothing to back out.
""")
return
server_fp.write("""\
# This page can be saved as a shell script and executed.
# It should be run at the top of your work area. It will update
# your working copy to back out the changes selected by the
# query.
""")
for commit in commits:
for fileinfo in commit.files:
if request.roottype == 'cvs':
server_fp.write('cvs update -j %s -j %s %s/%s\n'
% (fileinfo.rev, prev_rev(fileinfo.rev),
fileinfo.dir, fileinfo.file))
elif request.roottype == 'svn':
server_fp.write('svn merge -r %s:%s %s/%s\n'
% (fileinfo.rev, prev_rev(fileinfo.rev),
fileinfo.dir, fileinfo.file))
def view_query(request):
if not is_query_supported(request):
raise debug.ViewVCException('Can not query project root "%s" at "%s".'
% (request.rootname, request.where),
'403 Forbidden')
cfg = request.cfg
# Do some more precise input validation.
validate_query_args(request)
# get form data
branch = request.query_dict.get('branch', '')
branch_match = request.query_dict.get('branch_match', 'exact')
dir = request.query_dict.get('dir', '')
file = request.query_dict.get('file', '')
file_match = request.query_dict.get('file_match', 'exact')
who = request.query_dict.get('who', '')
who_match = request.query_dict.get('who_match', 'exact')
comment = request.query_dict.get('comment', '')
comment_match = request.query_dict.get('comment_match', 'exact')
querysort = request.query_dict.get('querysort', 'date')
date = request.query_dict.get('date', 'hours')
hours = request.query_dict.get('hours', '2')
mindate = request.query_dict.get('mindate', '')
maxdate = request.query_dict.get('maxdate', '')
format = request.query_dict.get('format')
limit_changes = int(request.query_dict.get('limit_changes',
cfg.options.limit_changes))
match_types = { 'exact':1, 'like':1, 'glob':1, 'regex':1, 'notregex':1 }
sort_types = { 'date':1, 'author':1, 'file':1 }
date_types = { 'hours':1, 'day':1, 'week':1, 'month':1,
'all':1, 'explicit':1 }
# parse various fields, validating or converting them
if not match_types.has_key(branch_match): branch_match = 'exact'
if not match_types.has_key(file_match): file_match = 'exact'
if not match_types.has_key(who_match): who_match = 'exact'
if not match_types.has_key(comment_match): comment_match = 'exact'
if not sort_types.has_key(querysort): querysort = 'date'
if not date_types.has_key(date): date = 'hours'
mindate = parse_date(mindate)
maxdate = parse_date(maxdate)
global cvsdb
import cvsdb
db = cvsdb.ConnectDatabaseReadOnly(cfg)
repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath)
if not repos_root:
raise debug.ViewVCException(
"The root '%s' was not found in the commit database "
% request.rootname)
# create the database query from the form data
query = cvsdb.CreateCheckinQuery()
query.SetRepository(repos_root)
# treat "HEAD" specially ...
if branch_match == 'exact' and branch == 'HEAD':
query.SetBranch('')
elif branch:
query.SetBranch(branch, branch_match)
if dir:
for subdir in dir.split(','):
path = (_path_join(repos_dir + request.path_parts
+ _path_parts(subdir.strip())))
query.SetDirectory(path, 'exact')
query.SetDirectory('%s/%%' % cvsdb.EscapeLike(path), 'like')
else:
where = _path_join(repos_dir + request.path_parts)
if where: # if we are in a subdirectory ...
query.SetDirectory(where, 'exact')
query.SetDirectory('%s/%%' % cvsdb.EscapeLike(where), 'like')
if file:
query.SetFile(file, file_match)
if who:
query.SetAuthor(who, who_match)
if comment:
query.SetComment(comment, comment_match)
query.SetSortMethod(querysort)
if date == 'hours':
query.SetFromDateHoursAgo(int(hours))
elif date == 'day':
query.SetFromDateDaysAgo(1)
elif date == 'week':
query.SetFromDateDaysAgo(7)
elif date == 'month':
query.SetFromDateDaysAgo(31)
elif date == 'all':
pass
elif date == 'explicit':
if mindate is not None:
query.SetFromDateObject(mindate)
if maxdate is not None:
query.SetToDateObject(maxdate)
# Set the admin-defined (via configuration) row limits. This is to avoid
# slamming the database server with a monster query.
if format == 'rss':
query.SetLimit(cfg.cvsdb.rss_row_limit)
else:
query.SetLimit(cfg.cvsdb.row_limit)
# run the query
db.RunQuery(query)
commit_list = query.GetCommitList()
row_limit_reached = query.GetLimitReached()
# gather commits
commits = []
plus_count = 0
minus_count = 0
mod_time = -1
if commit_list:
files = []
limited_files = 0
current_desc = commit_list[0].GetDescriptionID()
current_rev = commit_list[0].GetRevision()
dir_strip = _path_join(repos_dir)
for commit in commit_list:
commit_desc = commit.GetDescriptionID()
commit_rev = commit.GetRevision()
# base modification time on the newest commit
if commit.GetTime() > mod_time:
mod_time = commit.GetTime()
# For CVS, group commits with the same commit message.
# For Subversion, group them only if they have the same revision number
if request.roottype == 'cvs':
if current_desc == commit_desc:
files.append(commit)
continue
else:
if current_rev == commit_rev:
files.append(commit)
continue
# append this grouping
commit_item = build_commit(request, files, limit_changes,
dir_strip, format)
if commit_item:
# update running plus/minus totals
plus_count = plus_count + commit_item.plus
minus_count = minus_count + commit_item.minus
commits.append(commit_item)
files = [ commit ]
limited_files = 0
current_desc = commit_desc
current_rev = commit_rev
# we need to tack on our last commit grouping, if any
commit_item = build_commit(request, files, limit_changes,
dir_strip, format)
if commit_item:
# update running plus/minus totals
plus_count = plus_count + commit_item.plus
minus_count = minus_count + commit_item.minus
commits.append(commit_item)
# only show the branch column if we are querying all branches
# or doing a non-exact branch match on a CVS repository.
show_branch = ezt.boolean(request.roottype == 'cvs' and
(branch == '' or branch_match != 'exact'))
# backout link
params = request.query_dict.copy()
params['format'] = 'backout'
backout_href = request.get_url(params=params,
escape=1)
# link to zero limit_changes value
params = request.query_dict.copy()
params['limit_changes'] = 0
limit_changes_href = request.get_url(params=params, escape=1)
# if we got any results, use the newest commit as the modification time
if mod_time >= 0:
if check_freshness(request, mod_time):
return
if format == 'backout':
query_backout(request, commits)
return
data = common_template_data(request)
data.merge(TemplateData({
'sql': request.server.escape(db.CreateSQLQueryString(query)),
'english_query': english_query(request),
'queryform_href': request.get_url(view_func=view_queryform, escape=1),
'backout_href': backout_href,
'plus_count': plus_count,
'minus_count': minus_count,
'show_branch': show_branch,
'querysort': querysort,
'commits': commits,
'row_limit_reached' : ezt.boolean(row_limit_reached),
'limit_changes': limit_changes,
'limit_changes_href': limit_changes_href,
'rss_link_href': request.get_url(view_func=view_query,
params={'date': 'month'},
escape=1,
prefix=1),
}))
if format == 'rss':
generate_page(request, "rss", data, "application/rss+xml")
else:
generate_page(request, "query_results", data)
_views = {
'annotate': view_annotate,
'co': view_checkout,
'diff': view_diff,
'dir': view_directory,
'graph': view_cvsgraph,
'graphimg': view_cvsgraph_image,
'log': view_log,
'markup': view_markup,
'patch': view_patch,
'query': view_query,
'queryform': view_queryform,
'revision': view_revision,
'roots': view_roots,
'tar': download_tarball,
'redirect_pathrev': redirect_pathrev,
}
_view_codes = {}
for code, view in _views.items():
_view_codes[view] = code
def list_roots(request):
cfg = request.cfg
allroots = { }
# Add the viewable Subversion roots
for root in cfg.general.svn_roots.keys():
auth = setup_authorizer(cfg, request.username, root)
try:
repos = vclib.svn.SubversionRepository(root, cfg.general.svn_roots[root],
auth, cfg.utilities,
cfg.options.svn_config_dir)
lastmod = None
if cfg.options.show_roots_lastmod:
try:
repos.open()
youngest_rev = repos.youngest
date, author, msg, revprops, changes = repos.revinfo(youngest_rev)
date_str = make_time_string(date, cfg)
ago = html_time(request, date)
lf = LogFormatter(request, msg)
log = lf.get(maxlen=0, htmlize=1)
short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=1)
lastmod = _item(ago=ago, author=author, date=date_str, log=log,
short_log=short_log, rev=str(youngest_rev))
except:
lastmod = None
except vclib.ReposNotFound:
continue
allroots[root] = [cfg.general.svn_roots[root], 'svn', lastmod]
# Add the viewable CVS roots
for root in cfg.general.cvs_roots.keys():
auth = setup_authorizer(cfg, request.username, root)
try:
vclib.ccvs.CVSRepository(root, cfg.general.cvs_roots[root], auth,
cfg.utilities, cfg.options.use_rcsparse)
except vclib.ReposNotFound:
continue
allroots[root] = [cfg.general.cvs_roots[root], 'cvs', None]
return allroots
def _parse_root_parent(pp):
"""Parse a single root parent "directory [= context] : repo_type" string
and return as tuple."""
pos = pp.rfind(':')
if pos > 0:
repo_type = pp[pos+1:].strip()
pp = pp[:pos].strip()
else:
repo_type = None
pos = pp.rfind('=')
if pos > 0:
context = _path_parts(pp[pos+1:].strip())
pp = pp[:pos].strip()
else:
context = None
path = os.path.normpath(pp)
return path,context,repo_type
def expand_root_parents(cfg):
"""Expand the configured root parents into individual roots."""
# Each item in root_parents is a "directory [= context ] : repo_type" string.
for pp in cfg.general.root_parents:
path,context,repo_type = _parse_root_parent(pp)
if repo_type == 'cvs':
roots = vclib.ccvs.expand_root_parent(path)
if cfg.options.hide_cvsroot and roots.has_key('CVSROOT'):
del roots['CVSROOT']
if context:
fullroots = {}
for root, rootpath in roots.iteritems():
fullroots[_path_join(context + [root])] = rootpath
cfg.general.cvs_roots.update(fullroots)
else:
cfg.general.cvs_roots.update(roots)
elif repo_type == 'svn':
roots = vclib.svn.expand_root_parent(path)
if context:
fullroots = {}
for root, rootpath in roots.iteritems():
fullroots[_path_join(context + [root])] = rootpath
cfg.general.svn_roots.update(fullroots)
else:
cfg.general.svn_roots.update(roots)
elif repo_type == None:
raise debug.ViewVCException(
'The path "%s" in "root_parents" does not include a '
'repository type. Expected "cvs" or "svn".' % (pp))
else:
raise debug.ViewVCException(
'The path "%s" in "root_parents" has an unrecognized '
'repository type ("%s"). Expected "cvs" or "svn".'
% (pp, repo_type))
def find_root_in_parents(cfg, path_parts, roottype):
"""Return the rootpath for configured ROOTNAME of ROOTTYPE."""
# Easy out: caller wants rootname "CVSROOT", and we're hiding those.
if path_parts[-1] == 'CVSROOT' and cfg.options.hide_cvsroot:
return None
for pp in cfg.general.root_parents:
path,context,repo_type = _parse_root_parent(pp)
if repo_type != roottype:
continue
if context != None:
if not _path_starts_with(path_parts, context):
continue
rootidx = len(context)
else:
rootidx = 0
if len(path_parts) <= rootidx:
continue
rootname = path_parts[rootidx]
fullroot = _path_join(path_parts[0:rootidx+1])
remain = path_parts[rootidx+1:]
rootpath = None
if roottype == 'cvs':
rootpath = vclib.ccvs.find_root_in_parent(path, rootname)
elif roottype == 'svn':
rootpath = vclib.svn.find_root_in_parent(path, rootname)
if rootpath is not None:
return fullroot, rootpath, remain
return None, None, None
def locate_root_from_path(cfg, path_parts):
"""Return a 4-tuple ROOTTYPE, ROOTPATH, ROOTNAME, REMAIN for path_parts."""
for rootname, rootpath in cfg.general.cvs_roots.iteritems():
pp = _path_parts(rootname)
if _path_starts_with(path_parts, pp):
return 'cvs', rootpath, rootname, path_parts[len(pp):]
for rootname, rootpath in cfg.general.svn_roots.iteritems():
pp = _path_parts(rootname)
if _path_starts_with(path_parts, pp):
return 'svn', rootpath, rootname, path_parts[len(pp):]
rootname, path_in_parent, remain = \
find_root_in_parents(cfg, path_parts, 'cvs')
if path_in_parent:
cfg.general.cvs_roots[rootname] = path_in_parent
return 'cvs', path_in_parent, rootname, remain
rootname, path_in_parent, remain = \
find_root_in_parents(cfg, path_parts, 'svn')
if path_in_parent:
cfg.general.svn_roots[rootname] = path_in_parent
return 'svn', path_in_parent, rootname, remain
return None, None, None, None
def locate_root(cfg, rootname):
"""Return a 2-tuple ROOTTYPE, ROOTPATH for configured ROOTNAME."""
# First try a direct match
if cfg.general.cvs_roots.has_key(rootname):
return 'cvs', cfg.general.cvs_roots[rootname]
if cfg.general.svn_roots.has_key(rootname):
return 'svn', cfg.general.svn_roots[rootname]
path_parts = _path_parts(rootname)
roottype, rootpath, rootname_dupl, remain = \
locate_root_from_path(cfg, path_parts)
if roottype != None:
if rootname_dupl != rootname:
raise debug.ViewVCException(
'Found root name "%s" doesn\'t match "%s"' \
% (rootname_dupl, rootname),
'500 Internal Server Error')
if len(remain) > 0:
raise debug.ViewVCException(
'Have remaining path "%s"' \
% (remain),
'500 Internal Server Error')
return roottype, rootpath
def load_config(pathname=None, server=None):
"""Load the ViewVC configuration file. SERVER is the server object
that will be using this configuration. Consult the environment for
the variable VIEWVC_CONF_PATHNAME and VIEWCVS_CONF_PATHNAME (its
legacy name) and, if set, use its value as the path of the
configuration file; otherwise, use PATHNAME (if provided). Failing
all else, use a hardcoded default configuration path."""
debug.t_start('load-config')
# See if the environment contains overrides to the configuration
# path. If we have a SERVER object, consult its environment; use
# the OS environment otherwise.
env_get = server and server.getenv or os.environ.get
env_pathname = (env_get("VIEWVC_CONF_PATHNAME")
or env_get("VIEWCVS_CONF_PATHNAME"))
# Try to find the configuration pathname by searching these ordered
# locations: the environment, the passed-in PATHNAME, the hard-coded
# default.
pathname = (env_pathname
or pathname
or os.path.join(os.path.dirname(os.path.dirname(__file__)),
"viewvc.conf"))
# Load the configuration!
cfg = config.Config()
cfg.set_defaults()
cfg.load_config(pathname, env_get("HTTP_HOST"))
# Apply the stacktrace configuration immediately.
sys.tracebacklimit = cfg.options.stacktraces and 1000 or 0
# Load mime types file(s), but reverse the order -- our
# configuration uses a most-to-least preferred approach, but the
# 'mimetypes' package wants things the other way around.
if cfg.general.mime_types_files:
files = cfg.general.mime_types_files[:]
files.reverse()
files = map(lambda x, y=pathname: os.path.join(os.path.dirname(y), x), files)
mimetypes.init(files)
debug.t_end('load-config')
return cfg
def view_error(server, cfg):
exc_dict = debug.GetExceptionData()
status = exc_dict['status']
if exc_dict['msg']:
exc_dict['msg'] = server.escape(exc_dict['msg'])
if exc_dict['stacktrace']:
exc_dict['stacktrace'] = server.escape(exc_dict['stacktrace'])
# Use the configured error template if possible.
try:
if cfg and not server.headerSent:
server.header(status=status)
template = get_view_template(cfg, "error")
template.generate(server.file(), exc_dict)
return
except:
pass
# Fallback to the old exception printer if no configuration is
# available, or if something went wrong.
debug.PrintException(server, exc_dict)
def main(server, cfg):
try:
debug.t_start('main')
try:
# build a Request object, which contains info about the HTTP request
request = Request(server, cfg)
request.run_viewvc()
except SystemExit, e:
return
except:
view_error(server, cfg)
finally:
debug.t_end('main')
debug.t_dump(server.file())
debug.DumpChildren(server)
| 36.850374 | 95 | 0.616441 |
420189969bf4563b8cf581da5b36870eda316cbb | 238 | py | Python | topCoder/srms/300s/srm361/div2/search_box.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-09-30T19:53:08.000Z | 2020-09-30T19:53:08.000Z | topCoder/srms/300s/srm361/div2/search_box.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | null | null | null | topCoder/srms/300s/srm361/div2/search_box.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-10-15T09:10:57.000Z | 2020-10-15T09:10:57.000Z | import re
class SearchBox:
def find(self, text, search, wholeWord, start):
if wholeWord == 'Y':
search = r'\b%s\b' % search
m = re.compile(search).search(text, start)
return m.start() if m else -1
| 26.444444 | 51 | 0.571429 |
06be715ed0522ef932cb7f6bcb55a88df528fb92 | 6,631 | py | Python | tests/px_file_test.py | walles/px | e513e51de56d581b8ea1483acebf24547caec86d | [
"MIT"
] | 149 | 2016-03-27T20:39:37.000Z | 2022-03-01T07:53:42.000Z | tests/px_file_test.py | walles/px | e513e51de56d581b8ea1483acebf24547caec86d | [
"MIT"
] | 85 | 2016-06-06T17:33:54.000Z | 2022-02-14T06:06:58.000Z | tests/px_file_test.py | walles/px | e513e51de56d581b8ea1483acebf24547caec86d | [
"MIT"
] | 9 | 2016-05-05T11:22:13.000Z | 2021-03-04T12:03:59.000Z | import re
import sys
from px import px_file
if sys.version_info.major >= 3:
# For mypy PEP-484 static typing validation
from typing import List # NOQA
def test_lsof_to_files():
lsof = ""
lsof += "\0".join(["p123", "\n"])
lsof += "\0".join(["fcwd", "a ", "tDIR", "n/", "\n"])
lsof += "\0".join(["f5", "ar", "tREG", "ncontains\nnewline", "\n"])
lsof += "\0".join(["f6", "aw", "tREG", "d0x42", "n/somefile", "\n"])
lsof += "\0".join(["p456", "\n"])
lsof += "\0".join(["f7", "au", "tREG", "n/someotherfile", "\n"])
lsof += "\0".join(["f7", "a ", "n(revoked)", "\n"])
files = px_file.lsof_to_files(lsof)
assert len(files) == 5
assert files[0].pid == 123
assert files[0].access is None
assert files[0].device is None
assert files[0].device_number() is None
assert files[0].type == "DIR"
assert files[0].name == "/"
assert str(files[0]) == "[DIR] /"
assert files[1].pid == 123
assert files[1].access == "r"
assert files[1].device is None
assert files[1].device_number() is None
assert files[1].type == "REG"
assert files[1].name == "contains\nnewline"
assert str(files[1]) == "contains\nnewline"
assert files[2].pid == 123
assert files[2].access == "w"
assert files[2].device == "0x42"
assert files[2].device_number() == 0x42
assert files[2].type == "REG"
assert files[2].name == "/somefile"
assert str(files[2]) == "/somefile"
assert files[3].pid == 456
assert files[3].access == "rw"
assert files[3].device is None
assert files[3].device_number() is None
assert files[3].type == "REG"
assert files[3].name == "/someotherfile"
assert str(files[3]) == "/someotherfile"
assert files[4].pid == 456
assert files[4].access is None
assert files[4].device is None
assert files[4].device_number() is None
assert files[4].type == "??"
assert files[4].name == "(revoked)"
assert str(files[4]) == "[??] (revoked)"
def test_get_all():
files = px_file.get_all()
# As non-root I get 6000 on my system, 100 should be fine anywhere. And if
# not, we'll just have to document our finding and lower this value
assert len(files) > 100
cwd_count = 0
for file in files:
if file.fdtype == "cwd":
cwd_count += 1
assert cwd_count > 0
def lsof_to_file(shard_array):
# type: (List[str]) -> px_file.PxFile
return px_file.lsof_to_files("\0".join(shard_array + ["\n"]))[0]
def test_listen_name():
file = lsof_to_file(["p123", "f6", "au", "tIPv4", "d0x42", "nlocalhost:63342"])
assert file.name == "localhost:63342"
assert str(file) == "[IPv4] localhost:63342 (LISTEN)"
file = lsof_to_file(["p123", "f6", "au", "tIPv6", "d0x42", "nlocalhost:63342"])
assert file.name == "localhost:63342"
assert str(file) == "[IPv6] localhost:63342 (LISTEN)"
def test_setability():
# Can files be stored in sets?
a = lsof_to_file(["p123", "f6", "aw", "tREG", "d0x42", "n/somefile"])
b = lsof_to_file(["p123", "f6", "aw", "tREG", "d0x42", "n/somefile"])
s = set([a, b])
assert len(s) == 1
def test_local_endpoint():
local_endpoint = lsof_to_file(
["p123", "f6", "au", "tIPv4", "d0x42", "nlocalhost:postgres->localhost:33331"]
).get_endpoints()[0]
assert local_endpoint == "localhost:postgres"
local_endpoint = lsof_to_file(
["p123", "f6", "au", "tIPv6", "d0x42", "nlocalhost:39252->localhost:39252"]
).get_endpoints()[0]
assert local_endpoint == "localhost:39252"
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv6", "d0x42", "nlocalhost:19091"]
).get_endpoints()[0]
== "localhost:19091"
)
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv4", "d0x42", "nlocalhost:ipp (LISTEN)"]
).get_endpoints()[0]
== "localhost:ipp"
)
# We can't match against endpoint address "*"
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv6", "d0x42", "n*:57919"]
).get_endpoints()[0]
is None
)
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv4", "d0x42", "n*:57919"]
).get_endpoints()[0]
is None
)
assert (
lsof_to_file(["p123", "f6", "au", "tIPv4", "d0x42", "n*:*"]).get_endpoints()[0]
is None
)
assert (
lsof_to_file(
["p123", "f6", "aw", "tREG", "d0x42", "n/somefile"]
).get_endpoints()[0]
is None
)
def test_remote_endpoint():
remote_endpoint = lsof_to_file(
["p123", "f6", "au", "tIPv4", "d0x42", "nlocalhost:postgresql->localhost:3331"]
).get_endpoints()[1]
assert remote_endpoint == "localhost:3331"
remote_endpoint = lsof_to_file(
["p123", "f6", "au", "tIPv4", "d0x42", "nlocalhost:postgresql->otherhost:3331"]
).get_endpoints()[1]
assert remote_endpoint == "otherhost:3331"
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv6", "d0x42", "nlocalhost:19091"]
).get_endpoints()[1]
is None
)
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv6", "d0x42", "n*:57919"]
).get_endpoints()[1]
is None
)
assert (
lsof_to_file(
["p123", "f6", "au", "tIPv4", "d0x42", "n*:57919"]
).get_endpoints()[1]
is None
)
assert (
lsof_to_file(["p123", "f6", "au", "tIPv4", "d0x42", "n*:*"]).get_endpoints()[1]
is None
)
assert (
lsof_to_file(
["p123", "f6", "aw", "tREG", "d0x42", "n/somefile"]
).get_endpoints()[1]
is None
)
def test_str_resolve():
# FIXME: This will break if Google changes the name of 8.8.8.8
test_me = px_file.PxFile(pid=0, filetype="IPv4")
test_me.name = "127.0.0.1:51786->8.8.8.8:https"
assert str(test_me) in [
"[IPv4] localhost:51786->google-public-dns-a.google.com:https",
"[IPv4] localhost:51786->dns.google:https",
]
test_me = px_file.PxFile(pid=0, filetype="IPv4")
test_me.name = "127.0.0.1:17600"
assert str(test_me) == "[IPv4] localhost:17600 (LISTEN)"
test_me = px_file.PxFile(pid=0, filetype="IPv6")
test_me.name = "[::1]:17600"
match = re.match(r"^\[IPv6\] (.*):17600 \(LISTEN\)$", str(test_me))
assert match
resolution = match.group(1)
assert resolution == "[::1]" or "localhost" in resolution
test_me = px_file.PxFile(pid=0, filetype="IPv4")
test_me.name = "this:is:garbage:17600"
assert str(test_me) == "[IPv4] this:is:garbage:17600 (LISTEN)"
| 30.140909 | 87 | 0.566732 |
4fa4de8d3241676cedaed966bc044b5f1e367a82 | 282 | py | Python | compiler/example_configs/big_config_scn4m_subm.py | xinjie0831/OpenRAM | 76e2ab88fe4097ffa51e0387ba72165bcda49e68 | [
"BSD-3-Clause"
] | null | null | null | compiler/example_configs/big_config_scn4m_subm.py | xinjie0831/OpenRAM | 76e2ab88fe4097ffa51e0387ba72165bcda49e68 | [
"BSD-3-Clause"
] | null | null | null | compiler/example_configs/big_config_scn4m_subm.py | xinjie0831/OpenRAM | 76e2ab88fe4097ffa51e0387ba72165bcda49e68 | [
"BSD-3-Clause"
] | null | null | null | word_size = 32
num_words = 128
tech_name = "scn4m_subm"
process_corners = ["TT"]
supply_voltages = [ 5.0 ]
temperatures = [ 25 ]
output_path = "temp"
output_name = "sram_{0}_{1}_{2}".format(word_size,num_words,tech_name)
drc_name = "magic"
lvs_name = "netgen"
pex_name = "magic"
| 18.8 | 70 | 0.705674 |
c7f1915bfdf7eeb482a2c4b3a10895716bfe1c27 | 58 | py | Python | Client/game/client/log/logger.py | wuyueqpwoa/BloodyBlock | 74d5a8a623c3b0c01cafb4c6a4d2d89c10efb9c4 | [
"Apache-2.0"
] | null | null | null | Client/game/client/log/logger.py | wuyueqpwoa/BloodyBlock | 74d5a8a623c3b0c01cafb4c6a4d2d89c10efb9c4 | [
"Apache-2.0"
] | null | null | null | Client/game/client/log/logger.py | wuyueqpwoa/BloodyBlock | 74d5a8a623c3b0c01cafb4c6a4d2d89c10efb9c4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
日志工具
"""
def log(*args):
print args
| 6.444444 | 15 | 0.568966 |
1fe7510e2a66710d5838d56e44f296723ff1eea8 | 10,170 | py | Python | src/utils.py | h1w/voice-welcome | 08d12358146a112fe304be4c31dcdd41bb0ea396 | [
"BSD-3-Clause"
] | null | null | null | src/utils.py | h1w/voice-welcome | 08d12358146a112fe304be4c31dcdd41bb0ea396 | [
"BSD-3-Clause"
] | null | null | null | src/utils.py | h1w/voice-welcome | 08d12358146a112fe304be4c31dcdd41bb0ea396 | [
"BSD-3-Clause"
] | null | null | null | import wave
def ConvertPcmToWav(name, output_name):
with open(name, 'rb') as pcmfile:
pcmdata = pcmfile.read()
with wave.open(output_name, 'wb') as wavfile:
wavfile.setparams((1, 2, 44100, 1, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
def localtime_support_func(correct_time):
if (int(correct_time[0]) >= 5 and int(correct_time[0]) <= 20):
if (int(correct_time[2]) == 0):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас ровно {} часов и {} минута".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif(int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас ровно {} часов и {} минуты".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас ровно {} часов и {} минут".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif ((int(correct_time[2])%10 == 1 or int(correct_time[2])%10 == 0) or (int(correct_time[2]) >= 10 and int(correct_time[2]) < 20) or (int(correct_time[2])%10 >= 5 and int(correct_time[2])%10 <= 9)):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часов, {} минута и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif(int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часов, {} минуты и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} часов, {} минут и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[2])%10 == 1):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часов, {} минута и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часов, {} минуты и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} часов, {} минут и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[2])%10 <= 4):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часов, {} минута и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часов, {} минуты и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} часов, {} минут и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
if (int(correct_time[0]) == 1 or int(correct_time[0]) == 21):
if (int(correct_time[2]) == 0):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас ровно {} час и {} минута".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif(int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас ровно {} час и {} минуты".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас ровно {} час и {} минут".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif ((int(correct_time[2])%10 == 1 or int(correct_time[2])%10 == 0) or (int(correct_time[2]) >= 10 and int(correct_time[2]) < 20) or (int(correct_time[2])%10 >= 5 and int(correct_time[2])%10 <= 9)):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} час, {} минута и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif(int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} час, {} минуты и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} час, {} минут и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[2])%10 == 1):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} час, {} минута и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} час, {} минуты и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} час, {} минут и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[2])%10 <= 4):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} час, {} минута и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} час, {} минуты и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} час, {} минут и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
if ((int(correct_time[0])%10 >= 2 and int(correct_time[0]) <= 4) and (int(correct_time[0]) < 10 or int(correct_time[0]) > 20)):
if (int(correct_time[2]) == 0):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас ровно {} часа и {} минута".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif(int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас ровно {} часа и {} минуты".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас ровно {} часа и {} минут".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif ((int(correct_time[2])%10 == 1 or int(correct_time[2])%10 == 0) or (int(correct_time[2]) >= 10 and int(correct_time[2]) < 20) or (int(correct_time[2])%10 >= 5 and int(correct_time[2])%10 <= 9)):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часа, {} минута и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif(int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часа, {} минуты и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} часа, {} минут и {} секунд".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[2])%10 == 1):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часа, {} минута и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часа, {} минуты и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} часа, {} минут и {} секунда".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[2])%10 <= 4):
if (int(correct_time[1])%10 == 1 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часа, {} минута и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
elif (int(correct_time[1])%10 >= 2 and int(correct_time[1])%10 <= 4 and (int(correct_time[1]) >= 20 or int(correct_time[1]) <= 10)):
return "Местное время сейчас {} часа, {} минуты и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2]))
else:
return "Местное время сейчас {} часа, {} минут и {} секунды".format(int(correct_time[0]),int(correct_time[1]),int(correct_time[2])) | 93.302752 | 208 | 0.608948 |
1ce3f6f5bf2ba78f724dafa28a1526c3251ecc4b | 518 | py | Python | filenamescrambleint.py | voussoir/cmd | 9ecfc43751c42d4cdd288b8a1b28ba3a7fa6c650 | [
"BSD-3-Clause"
] | 6 | 2020-01-30T13:36:53.000Z | 2022-02-05T08:14:56.000Z | filenamescrambleint.py | voussoir/cmd | 9ecfc43751c42d4cdd288b8a1b28ba3a7fa6c650 | [
"BSD-3-Clause"
] | null | null | null | filenamescrambleint.py | voussoir/cmd | 9ecfc43751c42d4cdd288b8a1b28ba3a7fa6c650 | [
"BSD-3-Clause"
] | 1 | 2020-01-30T13:36:33.000Z | 2020-01-30T13:36:33.000Z | '''
Drag a file on top of this .py file, and it will have its
filename scrambled into a combination of 12 digits.
'''
import os
import random
import string
import sys
from voussoirkit import pathclass
argv = sys.argv[1:]
for path in pathclass.glob_many(argv):
newname = [random.choice(string.digits) for x in range(12)]
newname = ''.join(newname) + path.dot_extension
newname = path.parent.with_child(newname)
os.rename(path, newname)
print('%s -> %s' % (path.absolute_path, newname.basename))
| 24.666667 | 63 | 0.714286 |
060807268963c0d6f1e6fd4a8ec3ef36d43da8b1 | 1,723 | py | Python | pendulum_experiments/exp_common.py | numahha/wmopo | 1557dab2e8168c1f2e53ffbc435b4000680f1d28 | [
"MIT"
] | 1 | 2022-01-01T10:45:53.000Z | 2022-01-01T10:45:53.000Z | pendulum_experiments/exp_common.py | numahha/wmopo | 1557dab2e8168c1f2e53ffbc435b4000680f1d28 | [
"MIT"
] | 1 | 2022-03-03T17:03:35.000Z | 2022-03-03T17:03:35.000Z | pendulum_experiments/exp_common.py | numahha/wmopo | 1557dab2e8168c1f2e53ffbc435b4000680f1d28 | [
"MIT"
] | null | null | null | from regression import DynamicsRegression
from nll_estimation import NLLRegression
import torch
import numpy as np
from env_def import EnvDef
def default_c_hat(sa):
#print("hello")
return 0.
class ExpCommon():
def __init__(self,m_step_flag=False,
hidden_unit_num=8,
B_dash=1):
self.obac_data = np.loadtxt('np_obac.csv',delimiter=',')
self.diff_ob_data = np.loadtxt('np_diff_ob.csv',delimiter=',')
# construct model
self.dynamics_model = DynamicsRegression(self.obac_data, self.diff_ob_data, hidden_unit_num=hidden_unit_num, B_dash=B_dash)
envdef = EnvDef()
self.gamma = envdef.gamma
self.termination = envdef.termination
self.reward_fn = envdef.reward_fn
self.reset = envdef.reset
self.env_name = envdef.env_name
if m_step_flag is False:
self.c_hat=default_c_hat
else:
self.nllmodel=NLLRegression(self.obac_data, np.loadtxt('temp_unweighted_nll.csv',delimiter=','))
self.nllmodel.train_model()
self.c_hat=self.nllmodel.pred
def custom_reward_for_optimization(self, sa):
return self.reward_fn(sa) - (1.-self.gamma)*self.b_hat*self.c_hat(sa)
#return - (1. - np.exp(-1.*(sa[0]**2)))
def reset2(self):
return self.reset()
def wrap(self,local_envfn):
self.dynamics_model.load_model()
local_envfn.one_step = self.dynamics_model.sim_next_ob
self.b_hat=self.dynamics_model.get_b_hat()
local_envfn.reset = self.reset2
print("(1.-gamma)*b_hat =",(1.-self.gamma)*self.b_hat)
local_envfn.reward = self.custom_reward_for_optimization
| 31.327273 | 131 | 0.654092 |
7aa451f26240bf0b95300e8cca22b12ec9f4b923 | 1,454 | py | Python | sustainableCityManagement/main_project/ML_models/bikes_usage_prediction.py | Josh-repository/Dashboard-CityManager- | 6287881be9fb2c6274a755ce5d75ad355346468a | [
"RSA-MD"
] | null | null | null | sustainableCityManagement/main_project/ML_models/bikes_usage_prediction.py | Josh-repository/Dashboard-CityManager- | 6287881be9fb2c6274a755ce5d75ad355346468a | [
"RSA-MD"
] | null | null | null | sustainableCityManagement/main_project/ML_models/bikes_usage_prediction.py | Josh-repository/Dashboard-CityManager- | 6287881be9fb2c6274a755ce5d75ad355346468a | [
"RSA-MD"
] | 1 | 2021-05-13T16:33:18.000Z | 2021-05-13T16:33:18.000Z | import numpy as np
import math
import sys
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from ..Config.config_handler import read_config
config_vals = read_config("Bike_API")
# Time Series Prediction algorithm to predict the bike usage for days ahead
def predict_bikes_usage(arrayOfUsagePerDay, predictDays=1, previous_days_to_consider=config_vals["days_to_consider_for_prediction"]):
X = []
y = []
for i in range(len(arrayOfUsagePerDay)-previous_days_to_consider):
train_part = arrayOfUsagePerDay[i:i+previous_days_to_consider]
test_part = arrayOfUsagePerDay[i+previous_days_to_consider]
X.append(train_part)
y.append(test_part)
results = []
for i in range(predictDays):
reg = LinearRegression().fit(X, y)
to_predict = arrayOfUsagePerDay[len(
arrayOfUsagePerDay)-previous_days_to_consider:len(arrayOfUsagePerDay)]
y_pred = reg.predict([to_predict])
# adding prediction to the list of values (needed to create the to_predict)
arrayOfUsagePerDay.append(y_pred[0])
# adding train data point (needed for training)
X.append(to_predict)
y.append(y_pred) # adding test data point (needed for training)
results.append(y_pred) # adding prediction to results
return math.ceil(results[0])
| 38.263158 | 133 | 0.738652 |
9199bf112c71d122f3b11bdca0c0825727b72a5f | 10,262 | py | Python | pyAnVIL/anvil/fhir/client.py | mmtmn/client-apis | 215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f | [
"Apache-2.0"
] | 1 | 2022-01-12T21:50:44.000Z | 2022-01-12T21:50:44.000Z | pyAnVIL/anvil/fhir/client.py | mmtmn/client-apis | 215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f | [
"Apache-2.0"
] | null | null | null | pyAnVIL/anvil/fhir/client.py | mmtmn/client-apis | 215adae0b7f401b4bf62e7bd79b6a8adfe69cf4f | [
"Apache-2.0"
] | null | null | null | """Instances of this class handle authorizing and talking to Google Healthcare API FHIR Service."""
import logging
import threading
from urllib.parse import urlparse
from fhirclient import client
from fhirclient.models.meta import Meta
from fhirclient.models.bundle import Bundle
from anvil.fhir.smart_auth import GoogleFHIRAuth
logger = logging.getLogger(__name__)
class FHIRClient(client.FHIRClient):
"""Instances of this class handle authorizing and talking to Google Healthcare API FHIR Service.
Parameters:
See https://github.com/smart-on-fhir/client-py/blob/master/fhirclient/client.py#L19
Returns:
Instance of client, with injected authorization method
Examples: ::
from anvil.fhir.client import FHIRClient
settings = {
'app_id': 'my_web_app',
'api_base': 'https://healthcare.googleapis.com/v1/projects/gcp-testing-308520/locations/us-east4/datasets/testset/fhirStores/fhirstore/fhir'
}
smart = FHIRClient(settings=settings)
assert smart.ready, "server should be ready"
# search for all ResearchStudy
import fhirclient.models.researchstudy as rs
[s.title for s in rs.ResearchStudy.where(struct={}).perform_resources(smart.server)]
>>>
['1000g-high-coverage-2019', 'my NCPI research study example']
"""
def __init__(self, *args, **kwargs):
"""Pass args to super, adds GoogleFHIRAuth authenticator, prepares connection."""
super(FHIRClient, self).__init__(*args, **kwargs)
client_major_version = int(client.__version__.split('.')[0])
assert client_major_version >= 4, f"requires version >= 4.0.0 current version {client.__version__} `pip install -e git+https://github.com/smart-on-fhir/client-py#egg=fhirclient`"
self.server.auth = GoogleFHIRAuth()
self.server.session.hooks['response'].append(self.server.auth.handle_401)
self.prepare()
assert self.ready, "server should be ready"
class DispatchingFHIRClient(client.FHIRClient):
"""Instances of this class handle authorizing and talking to Google Healthcare API FHIR Service.
Parameters:
See https://github.com/smart-on-fhir/client-py/blob/master/fhirclient/client.py#L19
:param settings.api_bases: The servers against which to perform the search **settings.api_base ignored**
:param access_token: Optional access token, if none provided `gcloud auth print-access-token` is used
Returns:
Instance of client, with injected authorization method
Examples: ::
from anvil.fhir.client import DispatchingFHIRClient
from fhirclient.models.researchstudy import ResearchStudy
from collections import defaultdict
from pprint import pprint
settings = {
'app_id': 'my_web_app',
'api_bases': [
'https://healthcare.googleapis.com/v1beta1/projects/fhir-test-11-329119/locations/us-west2/datasets/anvil-test/fhirStores/public/fhir',
'https://healthcare.googleapis.com/v1beta1/projects/fhir-test-11-329119/locations/us-west2/datasets/anvil-test/fhirStores/pending/fhir',
]
}
smart = DispatchingFHIRClient(settings=settings)
# search for all ResearchStudy, index by source
studies = defaultdict(list)
for s in ResearchStudy.where(struct={'_count':'1000'}).perform_resources(smart.server):
studies[s.meta.source].append(s)
pprint({k: len(v) for k,v in studies.items()})
>>>
{'https://healthcare.googleapis.com/v1beta1/projects/fhir-test-11-329119/locations/us-west2/datasets/anvil-test/fhirStores/pending/fhir/': 259,
'https://healthcare.googleapis.com/v1beta1/projects/fhir-test-11-329119/locations/us-west2/datasets/anvil-test/fhirStores/public/fhir/': 393}
"""
def __init__(self, *args, **kwargs):
"""Pass args to super, patches `perform` to our dispatching version."""
# use the first entry as 'our' server
_settings = dict(kwargs['settings'])
api_base = _settings['api_bases'].pop()
_settings['api_base'] = api_base
kwargs['settings'] = _settings
# grab a token if passed
access_token = None
if 'access_token' in kwargs:
access_token = kwargs['access_token']
del kwargs['access_token']
# normal setup with our authenticator
super(DispatchingFHIRClient, self).__init__(*args, **kwargs)
client_major_version = int(client.__version__.split('.')[0])
assert client_major_version >= 4, f"requires version >= 4.0.0 current version {client.__version__} `pip install -e git+https://github.com/smart-on-fhir/client-py#egg=fhirclient`"
self.server.auth = GoogleFHIRAuth(access_token=access_token)
self.server.session.hooks['response'].append(self.server.auth.handle_401)
self.prepare()
assert self.ready, "server should be ready"
# set up an array of FHIRClients, including this instance, in self._clients
# re-use authenticator
self._clients = [self]
self._api_bases = _settings['api_bases']
for api_base in self._api_bases:
__settings = dict(_settings)
__settings['api_base'] = api_base
_client = client.FHIRClient(settings=__settings)
_client.server.auth = self.server.auth
_client.server.session.hooks['response'].append(self.server.auth.handle_401)
_client.prepare()
self._clients.append(_client)
# monkey patch search perform if we haven't already
from fhirclient.models.fhirsearch import FHIRSearch
if not hasattr(FHIRSearch, '_anvil_patch'):
FHIRSearch._anvil_patch = True
logger.debug("******** Needs patching ********")
original_perform = FHIRSearch.perform
me = self
def _perform(self, server):
"""Dispatch query to api_bases."""
def _worker(self, server, _results):
"""Dispatches request to underlying class, return an entry indexed by base uri.
Sets bundle.meta.source
See https://www.hl7.org/fhir/resource-definitions.html#Meta.source
:param server: The server against which to perform the search
:_results: Result of operation added to this array
"""
logger.debug(f"worker starting {server.base_uri}")
result = original_perform(self, server)
logger.debug(f"worker got {result}")
while result:
# add source to meta if it doesn't already exist
if not result.meta:
result.meta = Meta()
if not result.meta.source:
result.meta.source = server.base_uri
_results.append(result)
# follow `next` link for pagination
if hasattr(result, 'link'):
_next = next((lnk.as_json() for lnk in result.link if lnk.relation == 'next'), None)
result = None
if _next:
logger.debug(f"has next {_next}")
# request_json takes a full path & query (not host)
parts = urlparse(_next['url'])
assert len(parts.query) > 0, parts
path = f"{parts.path}?{parts.query}"
logger.debug(f"attempting next {path}")
res = server.request_json(path)
result = Bundle(res)
result.origin_server = server
else:
result = None
logger.debug(f"worker done {result}")
logger.debug("starting threads")
workers = []
results = []
for _client in me._clients:
workers.append(
threading.Thread(target=_worker, args=(self, _client.server, results, ))
)
# Start workers.
for w in workers:
w.start()
# Wait for workers to quit.
logger.debug("waiting for results.")
for w in workers:
w.join()
logger.debug(f"all workers done. {len(results)}")
return results
# monkey patch
FHIRSearch.perform = _perform
# since perform returns an array, patch _perform_resources as well.
def _perform_resources(self, server):
"""Perform the search by calling `perform`, then extracts all Bundle entries and returns a list of Resource instances.
Sets resource.meta.source
See https://www.hl7.org/fhir/resource-definitions.html#Meta.source
:param server: The server against which to perform the search
:returns: A list of Resource instances
"""
# flatten into an array of resources
bundles = self.perform(server)
resources = []
if bundles is not None:
if not isinstance(bundles, list):
bundles = [bundles]
for bundle in bundles:
if bundle.entry:
for entry in bundle.entry:
if not entry.resource.meta:
entry.resource.meta = Meta()
if not entry.resource.meta.source:
entry.resource.meta.source = bundle.meta.source
resources.append(entry.resource)
logger.debug("_perform_resources done.")
return resources
FHIRSearch.perform_resources = _perform_resources
| 45.608889 | 186 | 0.58663 |
172b837c0985e9196af3263683a12018484ee4d2 | 22 | py | Python | hlfbt/serial_console/__init__.py | hlfbt/serial-console | f9c770ea841c8ac1283b84f5883326363d4db1a8 | [
"MIT"
] | null | null | null | hlfbt/serial_console/__init__.py | hlfbt/serial-console | f9c770ea841c8ac1283b84f5883326363d4db1a8 | [
"MIT"
] | null | null | null | hlfbt/serial_console/__init__.py | hlfbt/serial-console | f9c770ea841c8ac1283b84f5883326363d4db1a8 | [
"MIT"
] | null | null | null | from . import console
| 11 | 21 | 0.772727 |
c18704645a55a0b457b7cef7bee1171b3d206c10 | 721 | py | Python | Semana4/Aula6-Hadoop/q8Reducer.py | cglsoft/DataScience-FDSII | 18c1de0f6cb1471aee88f9a547c242fbbc61fa19 | [
"Apache-2.0"
] | null | null | null | Semana4/Aula6-Hadoop/q8Reducer.py | cglsoft/DataScience-FDSII | 18c1de0f6cb1471aee88f9a547c242fbbc61fa19 | [
"Apache-2.0"
] | null | null | null | Semana4/Aula6-Hadoop/q8Reducer.py | cglsoft/DataScience-FDSII | 18c1de0f6cb1471aee88f9a547c242fbbc61fa19 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
numberOfOcorr = 0
totalPath = 0
mostPopular = None
oldKey = None
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
# Something has gone wrong. Skip this line.
continue
thisKey, thisPath = data_mapped
if oldKey and oldKey != thisKey:
if totalPath > numberOfOcorr:
numberOfOcorr = totalPath
mostPopular = oldKey
print( " Popular : {0} - Occurrences : {1}".format(oldKey,totalPath))
totalPath = 0
oldKey = thisKey
totalPath += 1
print("The most popular file is ",mostPopular,"\t","The number of occurrences is ",float(numberOfOcorr)) | 24.862069 | 104 | 0.613037 |
23b29f73096843a64339adea3a80950609f9a28b | 1,683 | py | Python | pyscf/lo/test/test_nao.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-05-02T19:55:30.000Z | 2018-05-02T19:55:30.000Z | pyscf/lo/test/test_nao.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | null | null | null | pyscf/lo/test/test_nao.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-12-06T03:10:50.000Z | 2018-12-06T03:10:50.000Z | #!/usr/bin/env python
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf.lo import nao
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = '''
O 0. 0. 0
1 0. -0.757 0.587
1 0. 0.757 0.587'''
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-14
mf.scf()
mol1 = mol.copy()
mol1.cart = True
mf1 = scf.RHF(mol1).set(conv_tol=1e-14).run()
class KnowValues(unittest.TestCase):
def test_pre_nao(self):
c = nao.prenao(mol, mf.make_rdm1())
self.assertAlmostEqual(numpy.linalg.norm(c), 5.7742626195362039, 9)
self.assertAlmostEqual(abs(c).sum(), 33.214804163888289, 6)
c = nao.prenao(mol1, mf1.make_rdm1())
self.assertAlmostEqual(numpy.linalg.norm(c), 5.5434134741828105, 9)
self.assertAlmostEqual(abs(c).sum(), 31.999905597187052, 6)
def test_nao(self):
c = nao.nao(mol, mf)
s = mf.get_ovlp()
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)),
numpy.eye(s.shape[0])))
self.assertAlmostEqual(numpy.linalg.norm(c), 8.982385484322208, 9)
self.assertAlmostEqual(abs(c).sum(), 90.443872916389637, 6)
c = nao.nao(mol1, mf1)
s = mf1.get_ovlp()
self.assertTrue(numpy.allclose(reduce(numpy.dot, (c.T, s, c)),
numpy.eye(s.shape[0])))
self.assertAlmostEqual(numpy.linalg.norm(c), 9.4629575662640129, 9)
self.assertAlmostEqual(abs(c).sum(), 100.24554485355642, 6)
if __name__ == "__main__":
print("Test orth")
unittest.main()
| 28.525424 | 75 | 0.607249 |
ba22fa61c99c4656a3ea833772fc6faa29ea4c89 | 7,234 | py | Python | examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | from .scene_stadium import SinglePlayerStadiumScene
from .env_bases import MJCFBaseBulletEnv
import numpy as np
import pybullet
from robot_locomotors import Hopper, Walker2D, HalfCheetah, Ant, Humanoid, HumanoidFlagrun, HumanoidFlagrunHarder
class WalkerBaseBulletEnv(MJCFBaseBulletEnv):
def __init__(self, robot, render=False):
# print("WalkerBase::__init__ start")
self.camera_x = 0
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
self.stateId = -1
MJCFBaseBulletEnv.__init__(self, robot, render)
def create_single_player_scene(self, bullet_client):
self.stadium_scene = SinglePlayerStadiumScene(bullet_client,
gravity=9.8,
timestep=0.0165 / 4,
frame_skip=4)
return self.stadium_scene
def reset(self):
if (self.stateId >= 0):
#print("restoreState self.stateId:",self.stateId)
self._p.restoreState(self.stateId)
r = MJCFBaseBulletEnv.reset(self)
self._p.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 0)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene(
self._p, self.stadium_scene.ground_plane_mjcf)
self.ground_ids = set([(self.parts[f].bodies[self.parts[f].bodyIndex],
self.parts[f].bodyPartIndex) for f in self.foot_ground_object_names])
self._p.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 1)
if (self.stateId < 0):
self.stateId = self._p.saveState()
#print("saving state self.stateId:",self.stateId)
return r
def _isDone(self):
return self._alive < 0
def move_robot(self, init_x, init_y, init_z):
"Used by multiplayer stadium to move sideways, to another running lane."
self.cpp_robot.query_position()
pose = self.cpp_robot.root_part.pose()
pose.move_xyz(
init_x, init_y, init_z
) # Works because robot loads around (0,0,0), and some robots have z != 0 that is left intact
self.cpp_robot.set_pose(pose)
electricity_cost = -2.0 # cost for using motors -- this parameter should be carefully tuned against reward for making progress, other values less improtant
stall_torque_cost = -0.1 # cost for running electric current through a motor even at zero rotational speed, small
foot_collision_cost = -1.0 # touches another leg, or other objects, that cost makes robot avoid smashing feet into itself
foot_ground_object_names = set(["floor"]) # to distinguish ground and other objects
joints_at_limit_cost = -0.1 # discourage stuck joints
def step(self, a):
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # also calculates self.joints_at_limit
self._alive = float(
self.robot.alive_bonus(
state[0] + self.robot.initial_z,
self.robot.body_rpy[1])) # state[0] is body height above ground, body_rpy[1] is pitch
done = self._isDone()
if not np.isfinite(state).all():
print("~INF~", state)
done = True
potential_old = self.potential
self.potential = self.robot.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i, f in enumerate(
self.robot.feet
): # TODO: Maybe calculating feet contacts could be done within the robot code
contact_ids = set((x[2], x[4]) for x in f.contact_list())
#print("CONTACT OF '%d' WITH %d" % (contact_ids, ",".join(contact_names)) )
if (self.ground_ids & contact_ids):
#see Issue 63: https://github.com/openai/roboschool/issues/63
#feet_collision_cost += self.foot_collision_cost
self.robot.feet_contact[i] = 1.0
else:
self.robot.feet_contact[i] = 0.0
electricity_cost = self.electricity_cost * float(np.abs(a * self.robot.joint_speeds).mean(
)) # let's assume we have DC motor with controller, and reverse current braking
electricity_cost += self.stall_torque_cost * float(np.square(a).mean())
joints_at_limit_cost = float(self.joints_at_limit_cost * self.robot.joints_at_limit)
debugmode = 0
if (debugmode):
print("alive=")
print(self._alive)
print("progress")
print(progress)
print("electricity_cost")
print(electricity_cost)
print("joints_at_limit_cost")
print(joints_at_limit_cost)
print("feet_collision_cost")
print(feet_collision_cost)
self.rewards = [
self._alive, progress, electricity_cost, joints_at_limit_cost, feet_collision_cost
]
if (debugmode):
print("rewards=")
print(self.rewards)
print("sum rewards")
print(sum(self.rewards))
self.HUD(state, a, done)
self.reward += sum(self.rewards)
return state, sum(self.rewards), bool(done), {}
def camera_adjust(self):
x, y, z = self.robot.body_real_xyz
self.camera_x = x
self.camera.move_and_look_at(self.camera_x, y , 1.4, x, y, 1.0)
class HopperBulletEnv(WalkerBaseBulletEnv):
def __init__(self, render=False):
self.robot = Hopper()
WalkerBaseBulletEnv.__init__(self, self.robot, render)
class Walker2DBulletEnv(WalkerBaseBulletEnv):
def __init__(self, render=False):
self.robot = Walker2D()
WalkerBaseBulletEnv.__init__(self, self.robot, render)
class HalfCheetahBulletEnv(WalkerBaseBulletEnv):
def __init__(self, render=False):
self.robot = HalfCheetah()
WalkerBaseBulletEnv.__init__(self, self.robot, render)
def _isDone(self):
return False
class AntBulletEnv(WalkerBaseBulletEnv):
def __init__(self, render=False):
self.robot = Ant()
WalkerBaseBulletEnv.__init__(self, self.robot, render)
class HumanoidBulletEnv(WalkerBaseBulletEnv):
def __init__(self, robot=None, render=False):
if robot is None:
self.robot = Humanoid()
else:
self.robot = robot
WalkerBaseBulletEnv.__init__(self, self.robot, render)
self.electricity_cost = 4.25 * WalkerBaseBulletEnv.electricity_cost
self.stall_torque_cost = 4.25 * WalkerBaseBulletEnv.stall_torque_cost
class HumanoidFlagrunBulletEnv(HumanoidBulletEnv):
random_yaw = True
def __init__(self, render=False):
self.robot = HumanoidFlagrun()
HumanoidBulletEnv.__init__(self, self.robot, render)
def create_single_player_scene(self, bullet_client):
s = HumanoidBulletEnv.create_single_player_scene(self, bullet_client)
s.zero_at_running_strip_start_line = False
return s
class HumanoidFlagrunHarderBulletEnv(HumanoidBulletEnv):
random_lean = True # can fall on start
def __init__(self, render=False):
self.robot = HumanoidFlagrunHarder()
self.electricity_cost /= 4 # don't care that much about electricity, just stand up!
HumanoidBulletEnv.__init__(self, self.robot, render)
def create_single_player_scene(self, bullet_client):
s = HumanoidBulletEnv.create_single_player_scene(self, bullet_client)
s.zero_at_running_strip_start_line = False
return s
| 35.635468 | 166 | 0.700581 |
da9a615070cbdc91da3f3619727658907d744338 | 4,977 | py | Python | angr/knowledge_plugins/functions/soot_function.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 6,132 | 2015-08-06T23:24:47.000Z | 2022-03-31T21:49:34.000Z | angr/knowledge_plugins/functions/soot_function.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 2,272 | 2015-08-10T08:40:07.000Z | 2022-03-31T23:46:44.000Z | angr/knowledge_plugins/functions/soot_function.py | Kyle-Kyle/angr | 345b2131a7a67e3a6ffc7d9fd475146a3e12f837 | [
"BSD-2-Clause"
] | 1,155 | 2015-08-06T23:37:39.000Z | 2022-03-31T05:54:11.000Z |
import os
import networkx
from collections import defaultdict
from .function import Function
class SootFunction(Function):
"""
A representation of a function and various information about it.
"""
def __init__(self, function_manager, addr, name=None, syscall=None):
"""
Function constructor for Soot
:param addr: The address of the function.
:param name: (Optional) The name of the function.
:param syscall: (Optional) Whether this function is a syscall or not.
"""
self.transition_graph = networkx.DiGraph()
self._local_transition_graph = None
# The Shimple CFG is already normalized.
self.normalized = True
# block nodes at whose ends the function returns
self._ret_sites = set()
# block nodes at whose ends the function jumps out to another function (jumps outside)
self._jumpout_sites = set()
# block nodes at whose ends the function calls out to another non-returning function
self._callout_sites = set()
# block nodes that ends the function by returning out to another function (returns outside). This is rare.
self._retout_sites = set()
# block nodes (basic block nodes) at whose ends the function terminates
# in theory, if everything works fine, endpoints == ret_sites | jumpout_sites | callout_sites
self._endpoints = defaultdict(set)
self._call_sites = {}
self.addr = addr
self._function_manager = function_manager
self.is_syscall = syscall
self._project = project = self._function_manager._kb._project
self.is_plt = False
self.is_simprocedure = False
if project.is_hooked(addr):
self.is_simprocedure = True
binary_name = None
if self.is_simprocedure:
hooker = project.hooked_by(addr)
if hooker is not None:
binary_name = hooker.library_name
if binary_name is None and self.binary is not None:
binary_name = os.path.basename(self.binary.binary)
self._name = addr.__repr__()
self.binary_name = binary_name
# Stack offsets of those arguments passed in stack variables
self._argument_stack_variables = []
# These properties are set by VariableManager
self.bp_on_stack = False
self.retaddr_on_stack = False
self.sp_delta = 0
# Calling convention
self.calling_convention = None
# Function prototype
self.prototype = None
# Whether this function returns or not. `None` means it's not determined yet
self._returning = None
self.alignment = None
# Determine returning status for SimProcedures and Syscalls
hooker = None
if self.is_simprocedure:
hooker = project.hooked_by(addr)
if hooker and hasattr(hooker, 'NO_RET'):
self.returning = not hooker.NO_RET
self.prepared_registers = set()
self.prepared_stack_variables = set()
self.registers_read_afterwards = set()
# startpoint can always be None if this CFGNode is a syscall node
self.startpoint = None
self._addr_to_block_node = {} # map addresses to nodes
self._block_sizes = {} # map addresses to block sizes
self._block_cache = {} # a cache of real, hard data Block objects
self._local_blocks = {} # a dict of all blocks inside the function
self._local_block_addrs = set() # a set of addresses of all blocks inside the function
self.info = { } # storing special information, like $gp values for MIPS32
self.tags = tuple() # store function tags. can be set manually by performing CodeTagging analysis.
def normalize(self):
# The Shimple CFG is already normalized.
pass
def _register_nodes(self, is_local, *nodes):
if not isinstance(is_local, bool):
raise AngrValueError('_register_nodes(): the "is_local" parameter must be a bool')
for node in nodes:
self.transition_graph.add_node(node)
node._graph = self.transition_graph
if node.addr not in self or self._block_sizes[node.addr] == 0:
self._block_sizes[node.addr] = node.size
if node.addr == self.addr.addr:
if self.startpoint is None or not self.startpoint.is_hook:
self.startpoint = node
if is_local:
self._local_blocks[node.addr] = node
self._local_block_addrs.add(node.addr)
# add BlockNodes to the addr_to_block_node cache if not already there
if isinstance(node, BlockNode):
if node.addr not in self._addr_to_block_node:
self._addr_to_block_node[node.addr] = node
from ...codenode import BlockNode
from ...errors import AngrValueError
| 37.421053 | 114 | 0.640547 |
4a32e134f90ceef3fb5744f8eef75133126dd46c | 9,781 | py | Python | alf/algorithms/predictive_representation_learner_test.py | 1nF0rmed/alf | 84bf56379d5fb552fb43365c5a77d8edc46d06c3 | [
"Apache-2.0"
] | null | null | null | alf/algorithms/predictive_representation_learner_test.py | 1nF0rmed/alf | 84bf56379d5fb552fb43365c5a77d8edc46d06c3 | [
"Apache-2.0"
] | null | null | null | alf/algorithms/predictive_representation_learner_test.py | 1nF0rmed/alf | 84bf56379d5fb552fb43365c5a77d8edc46d06c3 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pprint
import torch
import alf
import alf.data_structures as ds
from alf.algorithms.predictive_representation_learner import (
PredictiveRepresentationLearner, PredictiveRepresentationLearnerInfo,
SimpleDecoder)
from alf.experience_replayers.replay_buffer import ReplayBuffer, BatchInfo
from alf.networks import EncodingNetwork, LSTMEncodingNetwork
from alf.utils import common, dist_utils
class PredictiveRepresentationLearnerTest(alf.test.TestCase):
def test_preprocess_experience(self):
"""
The following summarizes how the data is generated:
.. code-block:: python
# position: 01234567890123
step_type0 = 'FMMMLFMMLFMMMM'
step_type1 = 'FMMMMMLFMMMMLF'
reward = position if train_reward_function and td_steps!=-1
else position * (step_type == LAST)
action = t + 1 for env 0
t for env 1
"""
num_unroll_steps = 4
batch_size = 2
obs_dim = 3
observation_spec = alf.TensorSpec([obs_dim])
action_spec = alf.BoundedTensorSpec((1, ),
minimum=0,
maximum=1,
dtype=torch.float32)
reward_spec = alf.TensorSpec(())
time_step_spec = ds.time_step_spec(observation_spec, action_spec,
reward_spec)
repr_learner = PredictiveRepresentationLearner(
observation_spec,
action_spec,
num_unroll_steps=num_unroll_steps,
decoder_ctor=partial(
SimpleDecoder,
target_field='reward',
decoder_net_ctor=partial(
EncodingNetwork, fc_layer_params=(4, ))),
encoding_net_ctor=LSTMEncodingNetwork,
dynamics_net_ctor=LSTMEncodingNetwork)
time_step = common.zero_tensor_from_nested_spec(
time_step_spec, batch_size)
state = repr_learner.get_initial_predict_state(batch_size)
alg_step = repr_learner.rollout_step(time_step, state)
alg_step = alg_step._replace(output=torch.tensor([[1.], [0.]]))
alg_step_spec = dist_utils.extract_spec(alg_step)
experience = ds.make_experience(time_step, alg_step, state)
experience_spec = ds.make_experience(time_step_spec, alg_step_spec,
repr_learner.train_state_spec)
replay_buffer = ReplayBuffer(
data_spec=experience_spec,
num_environments=batch_size,
max_length=16,
keep_episodic_info=True)
# 01234567890123
step_type0 = 'FMMMLFMMLFMMMM'
step_type1 = 'FMMMMMLFMMMMLF'
for i in range(len(step_type0)):
step_type = [step_type0[i], step_type1[i]]
step_type = [
ds.StepType.MID if c == 'M' else
(ds.StepType.FIRST if c == 'F' else ds.StepType.LAST)
for c in step_type
]
step_type = torch.tensor(step_type, dtype=torch.int32)
reward = reward = torch.full([batch_size], float(i))
time_step = time_step._replace(
discount=(step_type != ds.StepType.LAST).to(torch.float32),
step_type=step_type,
observation=torch.tensor([[i, i + 1, i], [i + 1, i, i]],
dtype=torch.float32),
reward=reward,
env_id=torch.arange(batch_size, dtype=torch.int32))
alg_step = repr_learner.rollout_step(time_step, state)
alg_step = alg_step._replace(output=i + torch.tensor([[1.], [0.]]))
experience = ds.make_experience(time_step, alg_step, state)
replay_buffer.add_batch(experience)
state = alg_step.state
env_ids = torch.tensor([0] * 14 + [1] * 14, dtype=torch.int64)
positions = torch.tensor(
list(range(14)) + list(range(14)), dtype=torch.int64)
experience = replay_buffer.get_field(None,
env_ids.unsqueeze(-1).cpu(),
positions.unsqueeze(-1).cpu())
experience = experience._replace(
replay_buffer=replay_buffer,
batch_info=BatchInfo(env_ids=env_ids, positions=positions),
rollout_info_field='rollout_info')
processed_experience = repr_learner.preprocess_experience(experience)
pprint.pprint(processed_experience.rollout_info)
# yapf: disable
expected = PredictiveRepresentationLearnerInfo(
action=torch.tensor(
[[[ 1., 2., 3., 4., 5.]],
[[ 2., 3., 4., 5., 5.]],
[[ 3., 4., 5., 5., 5.]],
[[ 4., 5., 5., 5., 5.]],
[[ 5., 5., 5., 5., 5.]],
[[ 6., 7., 8., 9., 9.]],
[[ 7., 8., 9., 9., 9.]],
[[ 8., 9., 9., 9., 9.]],
[[ 9., 9., 9., 9., 9.]],
[[10., 11., 12., 13., 14.]],
[[11., 12., 13., 14., 14.]],
[[12., 13., 14., 14., 14.]],
[[13., 14., 14., 14., 14.]],
[[14., 14., 14., 14., 14.]],
[[ 0., 1., 2., 3., 4.]],
[[ 1., 2., 3., 4., 5.]],
[[ 2., 3., 4., 5., 6.]],
[[ 3., 4., 5., 6., 6.]],
[[ 4., 5., 6., 6., 6.]],
[[ 5., 6., 6., 6., 6.]],
[[ 6., 6., 6., 6., 6.]],
[[ 7., 8., 9., 10., 11.]],
[[ 8., 9., 10., 11., 12.]],
[[ 9., 10., 11., 12., 12.]],
[[10., 11., 12., 12., 12.]],
[[11., 12., 12., 12., 12.]],
[[12., 12., 12., 12., 12.]],
[[13., 13., 13., 13., 13.]]]).unsqueeze(-1),
mask=torch.tensor(
[[[ True, True, True, True, True]],
[[ True, True, True, True, False]],
[[ True, True, True, False, False]],
[[ True, True, False, False, False]],
[[ True, False, False, False, False]],
[[ True, True, True, True, False]],
[[ True, True, True, False, False]],
[[ True, True, False, False, False]],
[[ True, False, False, False, False]],
[[ True, True, True, True, True]],
[[ True, True, True, True, False]],
[[ True, True, True, False, False]],
[[ True, True, False, False, False]],
[[ True, False, False, False, False]],
[[ True, True, True, True, True]],
[[ True, True, True, True, True]],
[[ True, True, True, True, True]],
[[ True, True, True, True, False]],
[[ True, True, True, False, False]],
[[ True, True, False, False, False]],
[[ True, False, False, False, False]],
[[ True, True, True, True, True]],
[[ True, True, True, True, True]],
[[ True, True, True, True, False]],
[[ True, True, True, False, False]],
[[ True, True, False, False, False]],
[[ True, False, False, False, False]],
[[ True, False, False, False, False]]]),
target=torch.tensor(
[[[ 0., 1., 2., 3., 4.]],
[[ 1., 2., 3., 4., 4.]],
[[ 2., 3., 4., 4., 4.]],
[[ 3., 4., 4., 4., 4.]],
[[ 4., 4., 4., 4., 4.]],
[[ 5., 6., 7., 8., 8.]],
[[ 6., 7., 8., 8., 8.]],
[[ 7., 8., 8., 8., 8.]],
[[ 8., 8., 8., 8., 8.]],
[[ 9., 10., 11., 12., 13.]],
[[10., 11., 12., 13., 13.]],
[[11., 12., 13., 13., 13.]],
[[12., 13., 13., 13., 13.]],
[[13., 13., 13., 13., 13.]],
[[ 0., 1., 2., 3., 4.]],
[[ 1., 2., 3., 4., 5.]],
[[ 2., 3., 4., 5., 6.]],
[[ 3., 4., 5., 6., 6.]],
[[ 4., 5., 6., 6., 6.]],
[[ 5., 6., 6., 6., 6.]],
[[ 6., 6., 6., 6., 6.]],
[[ 7., 8., 9., 10., 11.]],
[[ 8., 9., 10., 11., 12.]],
[[ 9., 10., 11., 12., 12.]],
[[10., 11., 12., 12., 12.]],
[[11., 12., 12., 12., 12.]],
[[12., 12., 12., 12., 12.]],
[[13., 13., 13., 13., 13.]]]))
# yapf: enable
alf.nest.map_structure(lambda x, y: self.assertEqual(x, y),
processed_experience.rollout_info, expected)
if __name__ == '__main__':
alf.test.main()
| 44.257919 | 80 | 0.458644 |
acdb01d3d3f5f913d3e7525c8e210687055cc9e9 | 20,145 | py | Python | python/pyspark/sql/functions.py | varadharajan/spark | a71cbbdea581573192a59bf8472861c463c40fcb | [
"Apache-2.0",
"MIT"
] | null | null | null | python/pyspark/sql/functions.py | varadharajan/spark | a71cbbdea581573192a59bf8472861c463c40fcb | [
"Apache-2.0",
"MIT"
] | null | null | null | python/pyspark/sql/functions.py | varadharajan/spark | a71cbbdea581573192a59bf8472861c463c40fcb | [
"Apache-2.0",
"MIT"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import math
import sys
if sys.version < "3":
from itertools import imap as map
from pyspark import SparkContext
from pyspark.rdd import _prepare_for_python_RDD, ignore_unicode_prefix
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import since
from pyspark.sql.types import StringType
from pyspark.sql.column import Column, _to_java_column, _to_seq
__all__ = [
'array',
'approxCountDistinct',
'coalesce',
'countDistinct',
'explode',
'monotonicallyIncreasingId',
'rand',
'randn',
'sparkPartitionId',
'struct',
'udf',
'when']
__all__ += ['lag', 'lead', 'ntile']
def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_functions = {
'lit': 'Creates a :class:`Column` of literal value.',
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'first': 'Aggregate function: returns the first value in a group.',
'last': 'Aggregate function: returns the last value in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': 'Computes the cosine inverse of the given value; the returned angle is in the range' +
'0.0 through pi.',
'asin': 'Computes the sine inverse of the given value; the returned angle is in the range' +
'-pi/2 through pi/2.',
'atan': 'Computes the tangent inverse of the given value.',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': 'Computes the cosine of the given value.',
'cosh': 'Computes the hyperbolic cosine of the given value.',
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': 'Computes the sine of the given value.',
'sinh': 'Computes the hyperbolic sine of the given value.',
'tan': 'Computes the tangent of the given value.',
'tanh': 'Computes the hyperbolic tangent of the given value.',
'toDegrees': 'Converts an angle measured in radians to an approximately equivalent angle ' +
'measured in degrees.',
'toRadians': 'Converts an angle measured in degrees to an approximately equivalent angle ' +
'measured in radians.',
'bitwiseNOT': 'Computes bitwise not.',
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': 'Returns the angle theta from the conversion of rectangular coordinates (x, y) to' +
'polar coordinates (r, theta).',
'hypot': 'Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'rowNumber':
"""returns a sequential number starting at 1 within a window partition.
This is equivalent to the ROW_NUMBER function in SQL.""",
'denseRank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and denseRank is that denseRank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using denseRank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and denseRank is that denseRank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using denseRank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third.
This is equivalent to the RANK function in SQL.""",
'cumeDist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.
This is equivalent to the CUME_DIST function in SQL.""",
'percentRank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.
This is equivalent to the PERCENT_RANK function in SQL.""",
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.4)(_create_window_function(_name, _doc))
del _name, _doc
__all__ += _functions.keys()
__all__ += _functions_1_4.keys()
__all__ += _binary_mathfunctions.keys()
__all__ += _window_functions.keys()
__all__.sort()
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""Returns a new :class:`Column` for approximate distinct count of ``col``.
>>> df.agg(approxCountDistinct(df.age).alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approxCountDistinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approxCountDistinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = sqlContext.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+-------------+
|Coalesce(a,b)|
+-------------+
| null|
| 1|
| 2|
+-------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+---------------+
| a| b|Coalesce(a,0.0)|
+----+----+---------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+---------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = sqlContext.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(1.4)
def monotonicallyIncreasingId():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonicallyIncreasingId().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonicallyIncreasingId())
@since(1.4)
def rand(seed=None):
"""Generates a random column with i.i.d. samples from U[0.0, 1.0].
"""
sc = SparkContext._active_spark_context
if seed:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@since(1.4)
def randn(seed=None):
"""Generates a column with i.i.d. samples from the standard normal distribution.
"""
sc = SparkContext._active_spark_context
if seed:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.4)
def sparkPartitionId():
"""A column for partition ID of the Spark task.
Note that this is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(sparkPartitionId().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sparkPartitionId())
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
that are named or aliased.
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.4)
def lag(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), count, default))
@since(1.4)
def lead(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), count, default))
@since(1.4)
def ntile(n):
"""
Window function: returns a group id from 1 to `n` (inclusive) in a round-robin fashion in
a window partition. Fow example, if `n` is 3, the first row will get 1, the second row will
get 2, the third row will get 3, and the fourth row will get 1...
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
"""
def __init__(self, func, returnType):
self.func = func
self.returnType = returnType
self._broadcast = None
self._judf = self._create_judf()
def _create_judf(self):
f = self.func # put it in closure `func`
func = lambda _, it: map(lambda x: f(*x), it)
ser = AutoBatchedSerializer(PickleSerializer())
command = (func, None, ser, ser)
sc = SparkContext._active_spark_context
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command, self)
ssql_ctx = sc._jvm.SQLContext(sc._jsc.sc())
jdt = ssql_ctx.parseDataType(self.returnType.json())
fname = f.__name__ if hasattr(f, '__name__') else f.__class__.__name__
judf = sc._jvm.UserDefinedPythonFunction(fname, bytearray(pickled_command), env, includes,
sc.pythonExec, sc.pythonVer, broadcast_vars,
sc._javaAccumulator, jdt)
return judf
def __del__(self):
if self._broadcast is not None:
self._broadcast.unpersist()
self._broadcast = None
def __call__(self, *cols):
sc = SparkContext._active_spark_context
jc = self._judf.apply(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def udf(f, returnType=StringType()):
"""Creates a :class:`Column` expression representing a user defined function (UDF).
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> df.select(slen(df.name).alias('slen')).collect()
[Row(slen=5), Row(slen=3)]
"""
return UserDefinedFunction(f, returnType)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['df'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 36.895604 | 100 | 0.655051 |
0974ac9a15d5dd47e826ff7270370c7c1cd1114c | 8,505 | py | Python | open/Dell/code/common/__init__.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 12 | 2021-09-23T08:05:57.000Z | 2022-03-21T03:52:11.000Z | open/Dell/code/common/__init__.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 11 | 2021-09-23T20:34:06.000Z | 2022-01-22T07:58:02.000Z | open/Dell/code/common/__init__.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 16 | 2021-09-23T20:26:38.000Z | 2022-03-09T12:59:56.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
import json
import platform
import subprocess
import sys
import re
from glob import glob
# TODO: Remove when constants.py is integrated
VERSION = "v1.0"
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s")
from code.common.system_list import KnownSystems, MIGConfiguration
def is_aarch64():
return platform.processor() == "aarch64"
def is_xavier():
if not is_aarch64():
return False
# Use the model file to determine whether the it's a Xavier system.
return os.path.exists("/sys/firmware/devicetree/base/model")
def check_xavier_version(s):
if not is_xavier():
return False
with open("/sys/firmware/devicetree/base/model", "r") as f:
txt = f.read()
return s in txt
def is_xavier_nx():
return check_xavier_version("NX")
def is_xavier_agx():
return check_xavier_version("AGX")
def check_mig_enabled():
"""Check if MIG is enabled on input GPU."""
p = subprocess.Popen("nvidia-smi -L", universal_newlines=True, shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
if re.search(r"MIG\s+\dg\.\d+gb", line):
return True
return False
def get_gpu_uuid_from_mig_uuid(mig_uuid):
"""Return GPU UUID corresponding to MIG UUID.
"""
gpu_mig_slice_mapping = MIGConfiguration.get_gpu_mig_slice_mapping()
ret_gpu_uuid = ""
for gpu_uuid, mig_slices in gpu_mig_slice_mapping.items():
mig_uuids = [mig_slice.uuid for mig_slice in mig_slices]
if mig_uuid in mig_uuids:
ret_gpu_uuid = gpu_uuid
break
return ret_gpu_uuid
def get_system():
"""Return a System object that describes computer system.
"""
# Quick path for CPU machines
if os.environ.get("USE_CPU") == "1":
cpu_info = run_command("lscpu | grep name", get_output=True, tee=False)
model_name = cpu_info[0].replace("Model name:", "").strip()
if "6258R" in model_name:
return KnownSystems.Triton_CPU_2S_6258R.get_match("2S_6258R", 1)
elif "8380H" in model_name:
return KnownSystems.Triton_CPU_4S_8380H.get_match("4S_8380H", 1)
else:
raise RuntimeError("Cannot find valid configs for {:}.".format(model_name))
# Check if system is Xavier
if is_xavier():
# Jetson Xavier is the only supported aarch64 platform.
with open("/sys/firmware/devicetree/base/model") as product_f:
product_name = product_f.read()
if "jetson" in product_name.lower():
if "AGX" in product_name:
return KnownSystems.AGX_Xavier.get_match("Jetson-AGX", 1)
elif "NX" in product_name:
return KnownSystems.Xavier_NX.get_match("Xavier NX", 1)
else:
raise RuntimeError("Unrecognized aarch64 device. Only AGX Xavier and Xavier NX are supported.")
# Check if MIG is enabled
mig_conf = None
if check_mig_enabled():
mig_conf = MIGConfiguration.from_nvidia_smi()
if mig_conf.num_mig_slices() == 0:
logging.warn("MIG is enabled, but no instances were detected.")
else:
logging.info("Found {:} MIG compute instances".format(mig_conf.num_mig_slices()))
# TODO: Investigate using py-nvml to get this information, instead of nvidia-smi. It may break on aarch64.
# Get GPU name and count from nvidia-smi
nvidia_smi_out = run_command("CUDA_VISIBLE_ORDER=PCI_BUS_ID nvidia-smi --query-gpu=gpu_name,pci.device_id,uuid --format=csv", get_output=True, tee=False)
# Remove first line (CSV column names) and strip empty lines
tmp = [line for line in nvidia_smi_out[1:] if len(line) > 0]
uuid2index = {line.split(',')[2].strip(): i for i, line in enumerate(tmp)}
# If CUDA_VISIBLE_DEVICES is set, apply it manually, as nvidia-smi doesn't obey it.
# Indexing is correct, as we set CUDA_VISIBLE_ORDER to PCI_BUS_ID.
if os.environ.get("CUDA_VISIBLE_DEVICES"):
seen_uuids = set()
indices = []
for g in os.environ.get("CUDA_VISIBLE_DEVICES").split(","):
if g.isnumeric():
indices.append(int(g))
else:
uuid = ""
if g.startswith("GPU-"):
uuid = g
elif g.startswith("MIG-"):
uuid = get_gpu_uuid_from_mig_uuid(g)
else:
raise RuntimeError("Invalid CUDA_VISIBILE_DEVICES")
if uuid not in seen_uuids:
seen_uuids.add(uuid)
indices.append(uuid2index[uuid])
tmp = [tmp[i] for i in indices]
count_actual = len(tmp)
if count_actual == 0:
raise RuntimeError("nvidia-smi did not detect any GPUs:\n{:}".format(nvidia_smi_out))
name, pci_id, uuid = tmp[0].split(", ")
assert(pci_id[-4:] == "10DE") # 10DE is NVIDIA PCI vendor ID
pci_id = pci_id.split("x")[1][:4] # Get the relevant 4 digit hex
system = None
for sysclass in KnownSystems.get_all_system_classes():
system = sysclass.get_match(name, count_actual, pci_id=pci_id, mig_conf=mig_conf)
if system:
break
if system is None:
raise RuntimeError("Cannot find valid configs for {:d}x {:}. Please follow performance_tuning_guide.md to add support for a new GPU.".format(count_actual, name))
return system
def run_command(cmd, get_output=False, tee=True, custom_env=None):
"""
Runs a command.
Args:
cmd (str): The command to run.
get_output (bool): If true, run_command will return the stdout output. Default: False.
tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does
not print to stdout.
"""
logging.info("Running command: {:}".format(cmd))
if not get_output:
return subprocess.check_call(cmd, shell=True)
else:
output = []
if custom_env is not None:
logging.info("Overriding Environment")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline, b""):
line = line.decode("utf-8")
if tee:
sys.stdout.write(line)
sys.stdout.flush()
output.append(line.rstrip("\n"))
ret = p.wait()
if ret == 0:
return output
else:
raise subprocess.CalledProcessError(ret, cmd)
def args_to_string(d, blacklist=[], delimit=True, double_delimit=False):
flags = []
for flag in d:
# Skip unset
if d[flag] is None:
continue
# Skip blacklisted
if flag in blacklist:
continue
if type(d[flag]) is bool:
if d[flag] is True:
flags.append("--{:}=true".format(flag))
elif d[flag] is False:
flags.append("--{:}=false".format(flag))
elif type(d[flag]) in [int, float] or not delimit:
flags.append("--{:}={:}".format(flag, d[flag]))
else:
if double_delimit:
flags.append("--{:}=\\\"{:}\\\"".format(flag, d[flag]))
else:
flags.append("--{:}=\"{:}\"".format(flag, d[flag]))
return " ".join(flags)
def flags_bool_to_int(d):
for flag in d:
if type(d[flag]) is bool:
if d[flag]:
d[flag] = 1
else:
d[flag] = 0
return d
def dict_get(d, key, default=None):
"""Return non-None value for key from dict. Use default if necessary."""
val = d.get(key, default)
return default if val is None else val
| 34.433198 | 169 | 0.623516 |
ce2212f0bac1442a91d91db103a287bf798be66e | 1,266 | py | Python | tests/urls.py | ercpe/django-two-factor-auth | 76866dd310903b3a34526becaa0a5012dea7debe | [
"MIT"
] | null | null | null | tests/urls.py | ercpe/django-two-factor-auth | 76866dd310903b3a34526becaa0a5012dea7debe | [
"MIT"
] | 1 | 2015-07-13T16:52:33.000Z | 2015-07-16T20:24:59.000Z | tests/urls.py | ercpe/django-two-factor-auth | 76866dd310903b3a34526becaa0a5012dea7debe | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, url, include
from django.contrib import admin
from two_factor.admin import AdminSiteOTPRequired
from two_factor.views import LoginView
from two_factor.urls import urlpatterns as tf_urls
from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls
from .views import SecureView
admin.autodiscover()
otp_admin_site = AdminSiteOTPRequired()
urlpatterns = patterns(
'',
url(
regex=r'^account/logout/$',
view='django.contrib.auth.views.logout',
name='logout',
),
url(
regex=r'^account/custom-login/$',
view=LoginView.as_view(redirect_field_name='next_page'),
name='custom-login',
),
url(
regex=r'^secure/$',
view=SecureView.as_view(),
),
url(
regex=r'^secure/raises/$',
view=SecureView.as_view(raise_anonymous=True, raise_unverified=True),
),
url(
regex=r'^secure/redirect_unverified/$',
view=SecureView.as_view(raise_anonymous=True,
verification_url='/account/login/'),
),
url(r'', include(tf_urls + tf_twilio_urls, 'two_factor')),
url(r'^admin/', include(admin.site.urls)),
url(r'^otp_admin/', include(otp_admin_site.urls)),
)
| 28.133333 | 77 | 0.659558 |
1c6d09948e07da2cb0e44b8404d5023c1f81baf3 | 27,616 | py | Python | external/AR/pytracking/evaluation/uavdataset.py | tzhhhh123/Stark | eaf7df3baf27ac064938f831211ae64659bc6808 | [
"MIT"
] | 376 | 2021-03-27T12:29:17.000Z | 2022-03-29T01:22:15.000Z | external/AR/pytracking/evaluation/uavdataset.py | wp8733684/Stark | ba59f9596b06bc687d726f991e1e7fce8af6b5a5 | [
"MIT"
] | 75 | 2021-03-31T12:44:45.000Z | 2022-03-28T09:02:57.000Z | external/AR/pytracking/evaluation/uavdataset.py | wp8733684/Stark | ba59f9596b06bc687d726f991e1e7fce8af6b5a5 | [
"MIT"
] | 82 | 2021-03-26T10:07:57.000Z | 2022-03-29T11:08:27.000Z | import numpy as np
from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList
from pytracking.utils.load_text import load_text
class UAVDataset(BaseDataset):
""" UAV123 dataset.
Publication:
A Benchmark and Simulator for UAV Tracking.
Matthias Mueller, Neil Smith and Bernard Ghanem
ECCV, 2016
https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf
Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx
"""
def __init__(self):
super().__init__()
self.base_path = self.env_settings.uav_path
self.sequence_info_list = self._get_sequence_info_list()
def get_sequence_list(self):
return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])
def _construct_sequence(self, sequence_info):
sequence_path = sequence_info['path']
nz = sequence_info['nz']
ext = sequence_info['ext']
start_frame = sequence_info['startFrame']
end_frame = sequence_info['endFrame']
init_omit = 0
if 'initOmit' in sequence_info:
init_omit = sequence_info['initOmit']
frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,
sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]
anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])
ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')
return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:],
object_class=sequence_info['object_class'])
def __len__(self):
return len(self.sequence_info_list)
def _get_sequence_info_list(self):
sequence_info_list = [
{"name": "uav_bike1", "path": "data_seq/UAV123/bike1", "startFrame": 1, "endFrame": 3085, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/bike1.txt", "object_class": "vehicle"},
{"name": "uav_bike2", "path": "data_seq/UAV123/bike2", "startFrame": 1, "endFrame": 553, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/bike2.txt", "object_class": "vehicle"},
{"name": "uav_bike3", "path": "data_seq/UAV123/bike3", "startFrame": 1, "endFrame": 433, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/bike3.txt", "object_class": "vehicle"},
{"name": "uav_bird1_1", "path": "data_seq/UAV123/bird1", "startFrame": 1, "endFrame": 253, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/bird1_1.txt", "object_class": "bird"},
{"name": "uav_bird1_2", "path": "data_seq/UAV123/bird1", "startFrame": 775, "endFrame": 1477, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/bird1_2.txt", "object_class": "bird"},
{"name": "uav_bird1_3", "path": "data_seq/UAV123/bird1", "startFrame": 1573, "endFrame": 2437, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/bird1_3.txt", "object_class": "bird"},
{"name": "uav_boat1", "path": "data_seq/UAV123/boat1", "startFrame": 1, "endFrame": 901, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat1.txt", "object_class": "vessel"},
{"name": "uav_boat2", "path": "data_seq/UAV123/boat2", "startFrame": 1, "endFrame": 799, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat2.txt", "object_class": "vessel"},
{"name": "uav_boat3", "path": "data_seq/UAV123/boat3", "startFrame": 1, "endFrame": 901, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat3.txt", "object_class": "vessel"},
{"name": "uav_boat4", "path": "data_seq/UAV123/boat4", "startFrame": 1, "endFrame": 553, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat4.txt", "object_class": "vessel"},
{"name": "uav_boat5", "path": "data_seq/UAV123/boat5", "startFrame": 1, "endFrame": 505, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat5.txt", "object_class": "vessel"},
{"name": "uav_boat6", "path": "data_seq/UAV123/boat6", "startFrame": 1, "endFrame": 805, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat6.txt", "object_class": "vessel"},
{"name": "uav_boat7", "path": "data_seq/UAV123/boat7", "startFrame": 1, "endFrame": 535, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat7.txt", "object_class": "vessel"},
{"name": "uav_boat8", "path": "data_seq/UAV123/boat8", "startFrame": 1, "endFrame": 685, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat8.txt", "object_class": "vessel"},
{"name": "uav_boat9", "path": "data_seq/UAV123/boat9", "startFrame": 1, "endFrame": 1399, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/boat9.txt", "object_class": "vessel"},
{"name": "uav_building1", "path": "data_seq/UAV123/building1", "startFrame": 1, "endFrame": 469, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/building1.txt", "object_class": "other"},
{"name": "uav_building2", "path": "data_seq/UAV123/building2", "startFrame": 1, "endFrame": 577, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/building2.txt", "object_class": "other"},
{"name": "uav_building3", "path": "data_seq/UAV123/building3", "startFrame": 1, "endFrame": 829, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/building3.txt", "object_class": "other"},
{"name": "uav_building4", "path": "data_seq/UAV123/building4", "startFrame": 1, "endFrame": 787, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/building4.txt", "object_class": "other"},
{"name": "uav_building5", "path": "data_seq/UAV123/building5", "startFrame": 1, "endFrame": 481, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/building5.txt", "object_class": "other"},
{"name": "uav_car1_1", "path": "data_seq/UAV123/car1", "startFrame": 1, "endFrame": 751, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car1_1.txt", "object_class": "car"},
{"name": "uav_car1_2", "path": "data_seq/UAV123/car1", "startFrame": 751, "endFrame": 1627, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car1_2.txt", "object_class": "car"},
{"name": "uav_car1_3", "path": "data_seq/UAV123/car1", "startFrame": 1627, "endFrame": 2629, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car1_3.txt", "object_class": "car"},
{"name": "uav_car10", "path": "data_seq/UAV123/car10", "startFrame": 1, "endFrame": 1405, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car10.txt", "object_class": "car"},
{"name": "uav_car11", "path": "data_seq/UAV123/car11", "startFrame": 1, "endFrame": 337, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car11.txt", "object_class": "car"},
{"name": "uav_car12", "path": "data_seq/UAV123/car12", "startFrame": 1, "endFrame": 499, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car12.txt", "object_class": "car"},
{"name": "uav_car13", "path": "data_seq/UAV123/car13", "startFrame": 1, "endFrame": 415, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car13.txt", "object_class": "car"},
{"name": "uav_car14", "path": "data_seq/UAV123/car14", "startFrame": 1, "endFrame": 1327, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car14.txt", "object_class": "car"},
{"name": "uav_car15", "path": "data_seq/UAV123/car15", "startFrame": 1, "endFrame": 469, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car15.txt", "object_class": "car"},
{"name": "uav_car16_1", "path": "data_seq/UAV123/car16", "startFrame": 1, "endFrame": 415, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car16_1.txt", "object_class": "car"},
{"name": "uav_car16_2", "path": "data_seq/UAV123/car16", "startFrame": 415, "endFrame": 1993, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car16_2.txt", "object_class": "car"},
{"name": "uav_car17", "path": "data_seq/UAV123/car17", "startFrame": 1, "endFrame": 1057, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car17.txt", "object_class": "car"},
{"name": "uav_car18", "path": "data_seq/UAV123/car18", "startFrame": 1, "endFrame": 1207, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car18.txt", "object_class": "car"},
{"name": "uav_car1_s", "path": "data_seq/UAV123/car1_s", "startFrame": 1, "endFrame": 1475, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car1_s.txt", "object_class": "car"},
{"name": "uav_car2", "path": "data_seq/UAV123/car2", "startFrame": 1, "endFrame": 1321, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car2.txt", "object_class": "car"},
{"name": "uav_car2_s", "path": "data_seq/UAV123/car2_s", "startFrame": 1, "endFrame": 320, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car2_s.txt", "object_class": "car"},
{"name": "uav_car3", "path": "data_seq/UAV123/car3", "startFrame": 1, "endFrame": 1717, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car3.txt", "object_class": "car"},
{"name": "uav_car3_s", "path": "data_seq/UAV123/car3_s", "startFrame": 1, "endFrame": 1300, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car3_s.txt", "object_class": "car"},
{"name": "uav_car4", "path": "data_seq/UAV123/car4", "startFrame": 1, "endFrame": 1345, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car4.txt", "object_class": "car"},
{"name": "uav_car4_s", "path": "data_seq/UAV123/car4_s", "startFrame": 1, "endFrame": 830, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car4_s.txt", "object_class": "car"},
{"name": "uav_car5", "path": "data_seq/UAV123/car5", "startFrame": 1, "endFrame": 745, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car5.txt", "object_class": "car"},
{"name": "uav_car6_1", "path": "data_seq/UAV123/car6", "startFrame": 1, "endFrame": 487, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car6_1.txt", "object_class": "car"},
{"name": "uav_car6_2", "path": "data_seq/UAV123/car6", "startFrame": 487, "endFrame": 1807, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car6_2.txt", "object_class": "car"},
{"name": "uav_car6_3", "path": "data_seq/UAV123/car6", "startFrame": 1807, "endFrame": 2953, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car6_3.txt", "object_class": "car"},
{"name": "uav_car6_4", "path": "data_seq/UAV123/car6", "startFrame": 2953, "endFrame": 3925, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car6_4.txt", "object_class": "car"},
{"name": "uav_car6_5", "path": "data_seq/UAV123/car6", "startFrame": 3925, "endFrame": 4861, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car6_5.txt", "object_class": "car"},
{"name": "uav_car7", "path": "data_seq/UAV123/car7", "startFrame": 1, "endFrame": 1033, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car7.txt", "object_class": "car"},
{"name": "uav_car8_1", "path": "data_seq/UAV123/car8", "startFrame": 1, "endFrame": 1357, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car8_1.txt", "object_class": "car"},
{"name": "uav_car8_2", "path": "data_seq/UAV123/car8", "startFrame": 1357, "endFrame": 2575, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car8_2.txt", "object_class": "car"},
{"name": "uav_car9", "path": "data_seq/UAV123/car9", "startFrame": 1, "endFrame": 1879, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/car9.txt", "object_class": "car"},
{"name": "uav_group1_1", "path": "data_seq/UAV123/group1", "startFrame": 1, "endFrame": 1333, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group1_1.txt", "object_class": "person"},
{"name": "uav_group1_2", "path": "data_seq/UAV123/group1", "startFrame": 1333, "endFrame": 2515, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group1_2.txt", "object_class": "person"},
{"name": "uav_group1_3", "path": "data_seq/UAV123/group1", "startFrame": 2515, "endFrame": 3925, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group1_3.txt", "object_class": "person"},
{"name": "uav_group1_4", "path": "data_seq/UAV123/group1", "startFrame": 3925, "endFrame": 4873, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group1_4.txt", "object_class": "person"},
{"name": "uav_group2_1", "path": "data_seq/UAV123/group2", "startFrame": 1, "endFrame": 907, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group2_1.txt", "object_class": "person"},
{"name": "uav_group2_2", "path": "data_seq/UAV123/group2", "startFrame": 907, "endFrame": 1771, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group2_2.txt", "object_class": "person"},
{"name": "uav_group2_3", "path": "data_seq/UAV123/group2", "startFrame": 1771, "endFrame": 2683, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group2_3.txt", "object_class": "person"},
{"name": "uav_group3_1", "path": "data_seq/UAV123/group3", "startFrame": 1, "endFrame": 1567, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group3_1.txt", "object_class": "person"},
{"name": "uav_group3_2", "path": "data_seq/UAV123/group3", "startFrame": 1567, "endFrame": 2827, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group3_2.txt", "object_class": "person"},
{"name": "uav_group3_3", "path": "data_seq/UAV123/group3", "startFrame": 2827, "endFrame": 4369, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group3_3.txt", "object_class": "person"},
{"name": "uav_group3_4", "path": "data_seq/UAV123/group3", "startFrame": 4369, "endFrame": 5527, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/group3_4.txt", "object_class": "person"},
{"name": "uav_person1", "path": "data_seq/UAV123/person1", "startFrame": 1, "endFrame": 799, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person1.txt", "object_class": "person"},
{"name": "uav_person10", "path": "data_seq/UAV123/person10", "startFrame": 1, "endFrame": 1021, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person10.txt", "object_class": "person"},
{"name": "uav_person11", "path": "data_seq/UAV123/person11", "startFrame": 1, "endFrame": 721, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person11.txt", "object_class": "person"},
{"name": "uav_person12_1", "path": "data_seq/UAV123/person12", "startFrame": 1, "endFrame": 601, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person12_1.txt", "object_class": "person"},
{"name": "uav_person12_2", "path": "data_seq/UAV123/person12", "startFrame": 601, "endFrame": 1621, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person12_2.txt", "object_class": "person"},
{"name": "uav_person13", "path": "data_seq/UAV123/person13", "startFrame": 1, "endFrame": 883, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person13.txt", "object_class": "person"},
{"name": "uav_person14_1", "path": "data_seq/UAV123/person14", "startFrame": 1, "endFrame": 847, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person14_1.txt", "object_class": "person"},
{"name": "uav_person14_2", "path": "data_seq/UAV123/person14", "startFrame": 847, "endFrame": 1813, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person14_2.txt", "object_class": "person"},
{"name": "uav_person14_3", "path": "data_seq/UAV123/person14", "startFrame": 1813, "endFrame": 2923,
"nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person14_3.txt", "object_class": "person"},
{"name": "uav_person15", "path": "data_seq/UAV123/person15", "startFrame": 1, "endFrame": 1339, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person15.txt", "object_class": "person"},
{"name": "uav_person16", "path": "data_seq/UAV123/person16", "startFrame": 1, "endFrame": 1147, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person16.txt", "object_class": "person"},
{"name": "uav_person17_1", "path": "data_seq/UAV123/person17", "startFrame": 1, "endFrame": 1501, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person17_1.txt", "object_class": "person"},
{"name": "uav_person17_2", "path": "data_seq/UAV123/person17", "startFrame": 1501, "endFrame": 2347,
"nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person17_2.txt", "object_class": "person"},
{"name": "uav_person18", "path": "data_seq/UAV123/person18", "startFrame": 1, "endFrame": 1393, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person18.txt", "object_class": "person"},
{"name": "uav_person19_1", "path": "data_seq/UAV123/person19", "startFrame": 1, "endFrame": 1243, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person19_1.txt", "object_class": "person"},
{"name": "uav_person19_2", "path": "data_seq/UAV123/person19", "startFrame": 1243, "endFrame": 2791,
"nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_2.txt", "object_class": "person"},
{"name": "uav_person19_3", "path": "data_seq/UAV123/person19", "startFrame": 2791, "endFrame": 4357,
"nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/person19_3.txt", "object_class": "person"},
{"name": "uav_person1_s", "path": "data_seq/UAV123/person1_s", "startFrame": 1, "endFrame": 1600, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person1_s.txt", "object_class": "person"},
{"name": "uav_person2_1", "path": "data_seq/UAV123/person2", "startFrame": 1, "endFrame": 1189, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person2_1.txt", "object_class": "person"},
{"name": "uav_person2_2", "path": "data_seq/UAV123/person2", "startFrame": 1189, "endFrame": 2623, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person2_2.txt", "object_class": "person"},
{"name": "uav_person20", "path": "data_seq/UAV123/person20", "startFrame": 1, "endFrame": 1783, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person20.txt", "object_class": "person"},
{"name": "uav_person21", "path": "data_seq/UAV123/person21", "startFrame": 1, "endFrame": 487, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person21.txt", "object_class": "person"},
{"name": "uav_person22", "path": "data_seq/UAV123/person22", "startFrame": 1, "endFrame": 199, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person22.txt", "object_class": "person"},
{"name": "uav_person23", "path": "data_seq/UAV123/person23", "startFrame": 1, "endFrame": 397, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person23.txt", "object_class": "person"},
{"name": "uav_person2_s", "path": "data_seq/UAV123/person2_s", "startFrame": 1, "endFrame": 250, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person2_s.txt", "object_class": "person"},
{"name": "uav_person3", "path": "data_seq/UAV123/person3", "startFrame": 1, "endFrame": 643, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person3.txt", "object_class": "person"},
{"name": "uav_person3_s", "path": "data_seq/UAV123/person3_s", "startFrame": 1, "endFrame": 505, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person3_s.txt", "object_class": "person"},
{"name": "uav_person4_1", "path": "data_seq/UAV123/person4", "startFrame": 1, "endFrame": 1501, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person4_1.txt", "object_class": "person"},
{"name": "uav_person4_2", "path": "data_seq/UAV123/person4", "startFrame": 1501, "endFrame": 2743, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person4_2.txt", "object_class": "person"},
{"name": "uav_person5_1", "path": "data_seq/UAV123/person5", "startFrame": 1, "endFrame": 877, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person5_1.txt", "object_class": "person"},
{"name": "uav_person5_2", "path": "data_seq/UAV123/person5", "startFrame": 877, "endFrame": 2101, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person5_2.txt", "object_class": "person"},
{"name": "uav_person6", "path": "data_seq/UAV123/person6", "startFrame": 1, "endFrame": 901, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person6.txt", "object_class": "person"},
{"name": "uav_person7_1", "path": "data_seq/UAV123/person7", "startFrame": 1, "endFrame": 1249, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person7_1.txt", "object_class": "person"},
{"name": "uav_person7_2", "path": "data_seq/UAV123/person7", "startFrame": 1249, "endFrame": 2065, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person7_2.txt", "object_class": "person"},
{"name": "uav_person8_1", "path": "data_seq/UAV123/person8", "startFrame": 1, "endFrame": 1075, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person8_1.txt", "object_class": "person"},
{"name": "uav_person8_2", "path": "data_seq/UAV123/person8", "startFrame": 1075, "endFrame": 1525, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person8_2.txt", "object_class": "person"},
{"name": "uav_person9", "path": "data_seq/UAV123/person9", "startFrame": 1, "endFrame": 661, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/person9.txt", "object_class": "person"},
{"name": "uav_truck1", "path": "data_seq/UAV123/truck1", "startFrame": 1, "endFrame": 463, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/truck1.txt", "object_class": "truck"},
{"name": "uav_truck2", "path": "data_seq/UAV123/truck2", "startFrame": 1, "endFrame": 385, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/truck2.txt", "object_class": "truck"},
{"name": "uav_truck3", "path": "data_seq/UAV123/truck3", "startFrame": 1, "endFrame": 535, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/truck3.txt", "object_class": "truck"},
{"name": "uav_truck4_1", "path": "data_seq/UAV123/truck4", "startFrame": 1, "endFrame": 577, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/truck4_1.txt", "object_class": "truck"},
{"name": "uav_truck4_2", "path": "data_seq/UAV123/truck4", "startFrame": 577, "endFrame": 1261, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/truck4_2.txt", "object_class": "truck"},
{"name": "uav_uav1_1", "path": "data_seq/UAV123/uav1", "startFrame": 1, "endFrame": 1555, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav1_1.txt", "object_class": "aircraft"},
{"name": "uav_uav1_2", "path": "data_seq/UAV123/uav1", "startFrame": 1555, "endFrame": 2377, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav1_2.txt", "object_class": "aircraft"},
{"name": "uav_uav1_3", "path": "data_seq/UAV123/uav1", "startFrame": 2473, "endFrame": 3469, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav1_3.txt", "object_class": "aircraft"},
{"name": "uav_uav2", "path": "data_seq/UAV123/uav2", "startFrame": 1, "endFrame": 133, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav2.txt", "object_class": "aircraft"},
{"name": "uav_uav3", "path": "data_seq/UAV123/uav3", "startFrame": 1, "endFrame": 265, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav3.txt", "object_class": "aircraft"},
{"name": "uav_uav4", "path": "data_seq/UAV123/uav4", "startFrame": 1, "endFrame": 157, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav4.txt", "object_class": "aircraft"},
{"name": "uav_uav5", "path": "data_seq/UAV123/uav5", "startFrame": 1, "endFrame": 139, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav5.txt", "object_class": "aircraft"},
{"name": "uav_uav6", "path": "data_seq/UAV123/uav6", "startFrame": 1, "endFrame": 109, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav6.txt", "object_class": "aircraft"},
{"name": "uav_uav7", "path": "data_seq/UAV123/uav7", "startFrame": 1, "endFrame": 373, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav7.txt", "object_class": "aircraft"},
{"name": "uav_uav8", "path": "data_seq/UAV123/uav8", "startFrame": 1, "endFrame": 301, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/uav8.txt", "object_class": "aircraft"},
{"name": "uav_wakeboard1", "path": "data_seq/UAV123/wakeboard1", "startFrame": 1, "endFrame": 421, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard1.txt", "object_class": "person"},
{"name": "uav_wakeboard10", "path": "data_seq/UAV123/wakeboard10", "startFrame": 1, "endFrame": 469,
"nz": 6, "ext": "jpg", "anno_path": "anno/UAV123/wakeboard10.txt", "object_class": "person"},
{"name": "uav_wakeboard2", "path": "data_seq/UAV123/wakeboard2", "startFrame": 1, "endFrame": 733, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard2.txt", "object_class": "person"},
{"name": "uav_wakeboard3", "path": "data_seq/UAV123/wakeboard3", "startFrame": 1, "endFrame": 823, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard3.txt", "object_class": "person"},
{"name": "uav_wakeboard4", "path": "data_seq/UAV123/wakeboard4", "startFrame": 1, "endFrame": 697, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard4.txt", "object_class": "person"},
{"name": "uav_wakeboard5", "path": "data_seq/UAV123/wakeboard5", "startFrame": 1, "endFrame": 1675, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard5.txt", "object_class": "person"},
{"name": "uav_wakeboard6", "path": "data_seq/UAV123/wakeboard6", "startFrame": 1, "endFrame": 1165, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard6.txt", "object_class": "person"},
{"name": "uav_wakeboard7", "path": "data_seq/UAV123/wakeboard7", "startFrame": 1, "endFrame": 199, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard7.txt", "object_class": "person"},
{"name": "uav_wakeboard8", "path": "data_seq/UAV123/wakeboard8", "startFrame": 1, "endFrame": 1543, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard8.txt", "object_class": "person"},
{"name": "uav_wakeboard9", "path": "data_seq/UAV123/wakeboard9", "startFrame": 1, "endFrame": 355, "nz": 6,
"ext": "jpg", "anno_path": "anno/UAV123/wakeboard9.txt", "object_class": "person"}
]
return sequence_info_list
| 92.053333 | 129 | 0.576477 |
d3d9c71ad9a5f54a2d32c65aa02eb134eef16d6b | 2,443 | py | Python | server/api/models/inference_group.py | NUS-CS-MComp/cs-cloud-computing-music-personality | 35cc926bef83fb8be3c6af680862343a67cd6e1c | [
"Apache-2.0"
] | 2 | 2021-07-13T07:57:48.000Z | 2021-11-18T08:20:38.000Z | server/api/models/inference_group.py | NUS-CS-MComp/cs-cloud-computing-music-personality | 35cc926bef83fb8be3c6af680862343a67cd6e1c | [
"Apache-2.0"
] | null | null | null | server/api/models/inference_group.py | NUS-CS-MComp/cs-cloud-computing-music-personality | 35cc926bef83fb8be3c6af680862343a67cd6e1c | [
"Apache-2.0"
] | null | null | null | from .base import BaseModel
from boto3.dynamodb.conditions import Key
class InferenceGroupModel(BaseModel):
"""
Inference user group index-referencing model
:param BaseModel: Inherit from base data model
:type BaseModel: BaseModel
:return: Inference user group index-referencing model object
:rtype: InferenceGroupModel
"""
def create(self, user_id, cluster_group):
"""
Create new inference instance
:param user_id: User ID tag
:type user_id: str
:param cluster_group: Cluster group id
:type cluster_group:: str
:return: Database response object
:rtype: dict
"""
item = {"cluster_group": cluster_group, "user_id": user_id}
return self.table.put_item(Item=item)
def get(self, user_id):
"""
Find inference data by user ID
:param user_id: User ID tag
:type user_id: str
:return: Database response object
:rtype: dict
"""
result = super().query(KeyConditionExpression=Key(self.id_key).eq(user_id))
try:
return result.pop()
except IndexError:
return None
def get_by_cluster(self, cluster_group):
"""
Find inference data by cluster group
:param cluster_group: Cluster group name
:type cluster_group: str
:return: Database response object
:rtype: dict
"""
result = super().query(
True,
IndexName=self.global_secondary_index,
KeyConditionExpression=Key(self.secondary_key).eq(
str(float(cluster_group))
),
)
return result
def update(self, user_id, cluster_group):
"""
Update user ID for inference data
:param user_id: User ID tag
:type user_id: str
:param cluster_group: Cluster group id
:type cluster_group:: str
:return: Database response object
:rtype: dict
"""
if not self.get(user_id):
return self.create(user_id, cluster_group)
super().delete(user_id)
return self.create(user_id, cluster_group)
@property
def table_name(self):
return "InferenceGroup"
@property
def id_key(self):
return "user_id"
@property
def secondary_key(self):
return "cluster_group"
InferenceGroup = InferenceGroupModel()
| 26.554348 | 83 | 0.607041 |
c6dea6080d08009e3189d25ae58c9ba227d148ee | 2,096 | py | Python | formulaic/parser/types/token.py | CamDavidsonPilon/formulaic | 7afb4e4029860f081e16473621595e2c47634933 | [
"MIT"
] | null | null | null | formulaic/parser/types/token.py | CamDavidsonPilon/formulaic | 7afb4e4029860f081e16473621595e2c47634933 | [
"MIT"
] | null | null | null | formulaic/parser/types/token.py | CamDavidsonPilon/formulaic | 7afb4e4029860f081e16473621595e2c47634933 | [
"MIT"
] | null | null | null | from enum import Enum
from .factor import Factor
from .term import Term
class Token:
class Kind(Enum):
OPERATOR = 'operator'
VALUE = 'value'
NAME = 'name'
PYTHON = 'python'
__slots__ = ('token', '_kind', 'source', 'source_start', 'source_end')
def __init__(self, token='', *, kind=None, source_start=None, source_end=None, source=None):
self.token = token
self.kind = kind
self.source = source
self.source_start = source_start
self.source_end = source_end
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = self.Kind(kind) if kind else kind
def __bool__(self):
return bool(self.token)
def update(self, char, source_index, kind=None):
self.token += char
if self.source_start is None:
self.source_start = source_index
self.source_end = source_index
if kind is not None:
self.kind = kind
return self
def __eq__(self, other):
if isinstance(other, str):
return self.token == other
if isinstance(other, Token):
return self.token == other.token and self.kind == other.kind
return NotImplemented
def __hash__(self):
return self.token.__hash__()
def __lt__(self, other):
if isinstance(other, Token):
return self.token < other.token
return NotImplemented
@property
def source_loc(self):
return (self.source_start, self.source_end)
def to_factor(self):
kind_to_eval_method = {
Token.Kind.NAME: 'lookup',
Token.Kind.PYTHON: 'python',
Token.Kind.VALUE: 'literal',
}
return Factor(
expr=self.token,
eval_method=kind_to_eval_method[self.kind],
)
def to_terms(self):
return {Term([self.to_factor()])}
def flatten(self, str_args=False):
return str(self) if str_args else self
def __repr__(self):
return self.token
| 25.560976 | 96 | 0.593034 |
09f4b780b501920170f2776fd396faf57622b856 | 2,931 | py | Python | tests/handlers/v2/test_errors.py | homebysix/consoleme | ff800dd154c4a2be30ff7350f58d92ea4c8446d0 | [
"Apache-2.0"
] | 2,835 | 2020-12-09T19:07:24.000Z | 2022-03-31T06:38:44.000Z | tests/handlers/v2/test_errors.py | homebysix/consoleme | ff800dd154c4a2be30ff7350f58d92ea4c8446d0 | [
"Apache-2.0"
] | 179 | 2020-12-10T01:51:25.000Z | 2022-03-31T02:06:06.000Z | tests/handlers/v2/test_errors.py | homebysix/consoleme | ff800dd154c4a2be30ff7350f58d92ea4c8446d0 | [
"Apache-2.0"
] | 219 | 2020-12-09T21:30:56.000Z | 2022-03-31T05:57:36.000Z | import ujson as json
from tornado.testing import AsyncHTTPTestCase
class TestNotFoundHandler(AsyncHTTPTestCase):
def get_app(self):
from consoleme.config import config
self.config = config
from consoleme.routes import make_app
return make_app(jwt_validator=lambda x: {})
def test_get(self):
expected = {"status": 404, "title": "Not Found", "message": "Not Found"}
headers = {
self.config.get("auth.user_header_name"): "user@github.com",
self.config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
response = self.fetch(
"/api/v2/route_does_not_exist", method="GET", headers=headers
)
self.assertEqual(response.code, 404)
self.assertDictEqual(json.loads(response.body), expected)
def test_put(self):
expected = {"status": 404, "title": "Not Found", "message": "Not Found"}
headers = {
self.config.get("auth.user_header_name"): "user@github.com",
self.config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
response = self.fetch(
"/api/v2/route_does_not_exist", method="PUT", headers=headers, body="{}"
)
self.assertEqual(response.code, 404)
self.assertDictEqual(json.loads(response.body), expected)
def test_post(self):
expected = {"status": 404, "title": "Not Found", "message": "Not Found"}
headers = {
self.config.get("auth.user_header_name"): "user@github.com",
self.config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
response = self.fetch(
"/api/v2/route_does_not_exist", method="POST", headers=headers, body="{}"
)
self.assertEqual(response.code, 404)
self.assertDictEqual(json.loads(response.body), expected)
def test_patch(self):
expected = {"status": 404, "title": "Not Found", "message": "Not Found"}
headers = {
self.config.get("auth.user_header_name"): "user@github.com",
self.config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
response = self.fetch(
"/api/v2/route_does_not_exist", method="PATCH", headers=headers, body="{}"
)
self.assertEqual(response.code, 404)
self.assertDictEqual(json.loads(response.body), expected)
def test_delete(self):
expected = {"status": 404, "title": "Not Found", "message": "Not Found"}
headers = {
self.config.get("auth.user_header_name"): "user@github.com",
self.config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
response = self.fetch(
"/api/v2/route_does_not_exist", method="DELETE", headers=headers
)
self.assertEqual(response.code, 404)
self.assertDictEqual(json.loads(response.body), expected)
| 40.150685 | 86 | 0.61276 |
df2ba17e67fe59ec50981b83518cb7720bbd67aa | 52,552 | py | Python | sarpy/annotation/afrl_elements/DetailObjectInfo.py | bombaci-vsc/sarpy | 3e31e9d7fca77612b60f2507f6f7068d1660a3e2 | [
"MIT"
] | 1 | 2021-07-05T15:14:03.000Z | 2021-07-05T15:14:03.000Z | sarpy/annotation/afrl_elements/DetailObjectInfo.py | bombaci-vsc/sarpy | 3e31e9d7fca77612b60f2507f6f7068d1660a3e2 | [
"MIT"
] | 1 | 2021-08-31T10:27:15.000Z | 2021-08-31T19:42:04.000Z | sarpy/annotation/afrl_elements/DetailObjectInfo.py | bombaci-vsc/sarpy | 3e31e9d7fca77612b60f2507f6f7068d1660a3e2 | [
"MIT"
] | 1 | 2021-07-17T12:49:57.000Z | 2021-07-17T12:49:57.000Z | """
Definition for the DetailObjectInfo AFRL labeling object
"""
__classification__ = "UNCLASSIFIED"
__authors__ = ("Thomas McCullough", "Thomas Rackers")
import logging
from typing import Optional, List
import numpy
from sarpy.compliance import string_types
from sarpy.io.xml.base import Serializable, Arrayable, create_text_node, create_new_node
from sarpy.io.xml.descriptors import StringDescriptor, FloatDescriptor, \
IntegerDescriptor, SerializableDescriptor, SerializableListDescriptor
from sarpy.io.complex.sicd_elements.blocks import RowColType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType
from sarpy.geometry.geocoords import geodetic_to_ecf, ecf_to_geodetic, wgs_84_norm
from sarpy.geometry.geometry_elements import Point, Polygon, GeometryCollection, Geometry
from .base import DEFAULT_STRICT
from .blocks import RangeCrossRangeType, RowColDoubleType, LatLonEleType
# TODO: Issue - do we need to set the nominal chip size?
# Comment - the articulation and configuration information is really not usable in
# its current form, and should be replaced with a (`name`, `value`) pair.
logger = logging.getLogger(__name__)
# the Object and sub-component definitions
class PhysicalType(Serializable):
_fields = ('ChipSize', 'CenterPixel')
_required = _fields
ChipSize = SerializableDescriptor(
'ChipSize', RangeCrossRangeType, _required, strict=DEFAULT_STRICT,
docstring='The chip size of the physical object, '
'in the appropriate plane') # type: RangeCrossRangeType
CenterPixel = SerializableDescriptor(
'CenterPixel', RowColDoubleType, _required, strict=DEFAULT_STRICT,
docstring='The center pixel of the physical object, '
'in the appropriate plane') # type: RowColDoubleType
def __init__(self, ChipSize=None, CenterPixel=None, **kwargs):
"""
Parameters
----------
ChipSize : RangeCrossRangeType|numpy.ndarray|list|tuple
CenterPixel : RowColDoubleType|numpy.ndarray|list|tuple
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.ChipSize = ChipSize
self.CenterPixel = CenterPixel
super(PhysicalType, self).__init__(**kwargs)
@classmethod
def from_ranges(cls, row_range, col_range, row_limit, col_limit):
"""
Construct from the row/column ranges and limits.
Parameters
----------
row_range
col_range
row_limit
col_limit
Returns
-------
PhysicalType
"""
first_row, last_row = max(0, row_range[0]), min(row_limit, row_range[1])
first_col, last_col = max(0, col_range[0]), min(col_limit, col_range[1])
return PhysicalType(
ChipSize=(last_row-first_row, last_col-first_col),
CenterPixel=(0.5*(last_row+first_row), 0.5*(last_col+first_col)))
class PlanePhysicalType(Serializable):
_fields = (
'Physical', 'PhysicalWithShadows')
_required = _fields
Physical = SerializableDescriptor(
'Physical', PhysicalType, _required,
docstring='Chip details for the physical object in the appropriate plane') # type: PhysicalType
PhysicalWithShadows = SerializableDescriptor(
'PhysicalWithShadows', PhysicalType, _required,
docstring='Chip details for the physical object including shadows in '
'the appropriate plane') # type: PhysicalType
def __init__(self, Physical=None, PhysicalWithShadows=None, **kwargs):
"""
Parameters
----------
Physical : PhysicalType
PhysicalWithShadows : PhysicalType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Physical = Physical
self.PhysicalWithShadows = PhysicalWithShadows
super(PlanePhysicalType, self).__init__(**kwargs)
class SizeType(Serializable, Arrayable):
_fields = ('Length', 'Width', 'Height')
_required = _fields
_numeric_format = {key: '0.16G' for key in _fields}
# Descriptors
Length = FloatDescriptor(
'Length', _required, strict=True, docstring='The Length attribute.') # type: float
Width = FloatDescriptor(
'Width', _required, strict=True, docstring='The Width attribute.') # type: float
Height = FloatDescriptor(
'Height', _required, strict=True, docstring='The Height attribute.') # type: float
def __init__(self, Length=None, Width=None, Height=None, **kwargs):
"""
Parameters
----------
Length : float
Width : float
Height : float
kwargs : dict
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Length, self.Width, self.Height = Length, Width, Height
super(SizeType, self).__init__(**kwargs)
def get_max_diameter(self):
"""
Gets the nominal maximum diameter for the item, in meters.
Returns
-------
float
"""
return float(numpy.sqrt(self.Length*self.Length + self.Width*self.Width))
def get_array(self, dtype='float64'):
"""
Gets an array representation of the class instance.
Parameters
----------
dtype : str|numpy.dtype|numpy.number
numpy data type of the return
Returns
-------
numpy.ndarray
array of the form [Length, Width, Height]
"""
return numpy.array([self.Length, self.Width, self.Height], dtype=dtype)
@classmethod
def from_array(cls, array):
"""
Create from an array type entry.
Parameters
----------
array: numpy.ndarray|list|tuple
assumed [Length, Width, Height]
Returns
-------
SizeType
"""
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 3:
raise ValueError('Expected array to be of length 3, and received {}'.format(array))
return cls(Length=array[0], Width=array[1], Height=array[2])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class OrientationType(Serializable):
_fields = ('Roll', 'Pitch', 'Yaw', 'AzimuthAngle')
_required = ()
_numeric_format = {key: '0.16G' for key in _fields}
# descriptors
Roll = FloatDescriptor(
'Roll', _required) # type: float
Pitch = FloatDescriptor(
'Pitch', _required) # type: float
Yaw = FloatDescriptor(
'Yaw', _required) # type: float
AzimuthAngle = FloatDescriptor(
'AzimuthAngle', _required) # type: float
def __init__(self, Roll=None, Pitch=None, Yaw=None, AzimuthAngle=None, **kwargs):
"""
Parameters
----------
Roll : float
Pitch : float
Yaw : float
AzimuthAngle : float
kwargs : dict
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Roll = Roll
self.Pitch = Pitch
self.Yaw = Yaw
self.AzimuthAngle = AzimuthAngle
super(OrientationType, self).__init__(**kwargs)
class ImageLocationType(Serializable):
_fields = (
'CenterPixel', 'LeftFrontPixel', 'RightFrontPixel', 'RightRearPixel',
'LeftRearPixel')
_required = _fields
# descriptors
CenterPixel = SerializableDescriptor(
'CenterPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: RowColType
LeftFrontPixel = SerializableDescriptor(
'LeftFrontPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: RowColType
RightFrontPixel = SerializableDescriptor(
'RightFrontPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: RowColType
RightRearPixel = SerializableDescriptor(
'RightRearPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: RowColType
LeftRearPixel = SerializableDescriptor(
'LeftRearPixel', RowColType, _required, strict=DEFAULT_STRICT,
docstring='') # type: RowColType
def __init__(self, CenterPixel=None, LeftFrontPixel=None, RightFrontPixel=None,
RightRearPixel=None, LeftRearPixel=None, **kwargs):
"""
Parameters
----------
CenterPixel : RowColType|numpy.ndarray|list|tuple
LeftFrontPixel : RowColType|numpy.ndarray|list|tuple
RightFrontPixel : RowColType|numpy.ndarray|list|tuple
RightRearPixel : RowColType|numpy.ndarray|list|tuple
LeftRearPixel : RowColType|numpy.ndarray|list|tuple
kwargs : dict
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CenterPixel = CenterPixel
self.LeftFrontPixel = LeftFrontPixel
self.RightFrontPixel = RightFrontPixel
self.RightRearPixel = RightRearPixel
self.LeftRearPixel = LeftRearPixel
super(ImageLocationType, self).__init__(**kwargs)
@classmethod
def from_geolocation(cls, geo_location, the_structure):
"""
Construct the image location from the geographical location via
projection using the SICD model.
Parameters
----------
geo_location : GeoLocationType
the_structure : SICDType|SIDDType
Returns
-------
None|ImageLocationType
None if projection fails, the value otherwise
"""
if geo_location is None:
return None
if not the_structure.can_project_coordinates():
logger.warning(
'This sicd does not permit projection,\n\t'
'so the image location can not be inferred')
return None
# make sure this is defined, for the sake of efficiency
the_structure.define_coa_projection(overide=False)
kwargs = {}
if isinstance(the_structure, SICDType):
image_shift = numpy.array(
[the_structure.ImageData.FirstRow, the_structure.ImageData.FirstCol], dtype='float64')
else:
image_shift = numpy.zeros((2, ), dtype='float64')
for attribute in cls._fields:
value = getattr(geo_location, attribute)
if value is not None:
absolute_pixel_location = the_structure.project_ground_to_image_geo(
value.get_array(dtype='float64'), ordering='latlong')
if numpy.any(numpy.isnan(absolute_pixel_location)):
return None
kwargs[attribute] = absolute_pixel_location - image_shift
out = ImageLocationType(**kwargs)
out.infer_center_pixel()
return out
def infer_center_pixel(self):
"""
Infer the center pixel, if not populated.
Returns
-------
None
"""
if self.CenterPixel is not None:
return
current = numpy.zeros((2, ), dtype='float64')
for entry in self._fields:
if entry == 'CenterPixel':
continue
value = getattr(self, entry)
if value is None:
return
current += 0.25*value.get_array(dtype='float64')
self.CenterPixel = RowColType.from_array(current)
def get_nominal_box(self, row_length=10, col_length=10):
"""
Get a nominal box containing the object, using the default side length if necessary.
Parameters
----------
row_length : int|float
The side length to use for the rectangle, if not defined.
col_length : int|float
The side length to use for the rectangle, if not defined.
Returns
-------
None|numpy.ndarray
"""
if self.LeftFrontPixel is not None and self.RightFrontPixel is not None and \
self.LeftRearPixel is not None and self.RightRearPixel is not None:
out = numpy.zeros((4, 2), dtype='float64')
out[0, :] = self.LeftFrontPixel.get_array()
out[1, :] = self.RightFrontPixel.get_array()
out[2, :] = self.RightRearPixel.get_array()
out[3, :] = self.LeftRearPixel.get_array()
return out
if self.CenterPixel is None:
return None
shift = numpy.array([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], dtype='float64')
shift[:, 0] *= row_length
shift[:, 1] *= col_length
return self.CenterPixel.get_array(dtype='float64') + shift
def get_geometry_object(self):
"""
Gets the geometry for the given image section.
Returns
-------
Geometry
"""
point = None
polygon = None
if self.CenterPixel is not None:
point = Point(coordinates=self.CenterPixel.get_array(dtype='float64'))
if self.LeftFrontPixel is not None and \
self.RightFrontPixel is not None and \
self.RightRearPixel is not None and \
self.LeftRearPixel is not None:
ring = numpy.zeros((4, 2), dtype='float64')
ring[0, :] = self.LeftFrontPixel.get_array(dtype='float64')
ring[1, :] = self.RightFrontPixel.get_array(dtype='float64')
ring[2, :] = self.RightRearPixel.get_array(dtype='float64')
ring[3, :] = self.LeftRearPixel.get_array(dtype='float64')
polygon = Polygon(coordinates=[ring, ])
if point is not None and polygon is not None:
return GeometryCollection(geometries=[point, polygon])
elif point is not None:
return point
elif polygon is not None:
return polygon
else:
return None
class GeoLocationType(Serializable):
_fields = (
'CenterPixel', 'LeftFrontPixel', 'RightFrontPixel', 'RightRearPixel',
'LeftRearPixel')
_required = _fields
# descriptors
CenterPixel = SerializableDescriptor(
'CenterPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonEleType
LeftFrontPixel = SerializableDescriptor(
'LeftFrontPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonEleType
RightFrontPixel = SerializableDescriptor(
'RightFrontPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonEleType
RightRearPixel = SerializableDescriptor(
'RightRearPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonEleType
LeftRearPixel = SerializableDescriptor(
'LeftRearPixel', LatLonEleType, _required, strict=DEFAULT_STRICT,
docstring='') # type: LatLonEleType
def __init__(self, CenterPixel=None, LeftFrontPixel=None, RightFrontPixel=None,
RightRearPixel=None, LeftRearPixel=None, **kwargs):
"""
Parameters
----------
CenterPixel : LatLonEleType|numpy.ndarray|list|tuple
LeftFrontPixel : LatLonEleType|numpy.ndarray|list|tuple
RightFrontPixel : LatLonEleType|numpy.ndarray|list|tuple
RightRearPixel : LatLonEleType|numpy.ndarray|list|tuple
LeftRearPixel : LatLonEleType|numpy.ndarray|list|tuple
kwargs : dict
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CenterPixel = CenterPixel
self.LeftFrontPixel = LeftFrontPixel
self.RightFrontPixel = RightFrontPixel
self.RightRearPixel = RightRearPixel
self.LeftRearPixel = LeftRearPixel
super(GeoLocationType, self).__init__(**kwargs)
# noinspection PyUnusedLocal
@classmethod
def from_image_location(cls, image_location, the_structure, projection_type='HAE', **kwargs):
"""
Construct the geographical location from the image location via
projection using the SICD model.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
image_location : ImageLocationType
the_structure : SICDType|SIDDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
Returns
-------
None|GeoLocationType
Coordinates may be populated as `NaN` if projection fails.
"""
if image_location is None:
return None
if not the_structure.can_project_coordinates():
logger.warning(
'This sicd does not permit projection,\n\t'
'so the image location can not be inferred')
return None
# make sure this is defined, for the sake of efficiency
the_structure.define_coa_projection(overide=False)
kwargs = {}
if isinstance(the_structure, SICDType):
image_shift = numpy.array(
[the_structure.ImageData.FirstRow, the_structure.ImageData.FirstCol], dtype='float64')
else:
image_shift = numpy.zeros((2, ), dtype='float64')
for attribute in cls._fields:
value = getattr(image_location, attribute)
if value is not None:
coords = value.get_array(dtype='float64') + image_shift
geo_coords = the_structure.project_image_to_ground_geo(
coords, ordering='latlong', projection_type=projection_type, **kwargs)
kwargs[attribute] = geo_coords
out = GeoLocationType(**kwargs)
out.infer_center_pixel()
return out
def infer_center_pixel(self):
"""
Infer the center pixel, if not populated.
Returns
-------
None
"""
if self.CenterPixel is not None:
return
current = numpy.zeros((3, ), dtype='float64')
for entry in self._fields:
if entry == 'CenterPixel':
continue
value = getattr(self, entry)
if value is None:
return
current += 0.25*geodetic_to_ecf(value.get_array(dtype='float64'))
self.CenterPixel = LatLonEleType.from_array(ecf_to_geodetic(current))
class FreeFormType(Serializable):
_fields = ('Name', 'Value')
_required = _fields
Name = StringDescriptor(
'Name', _required) # type: str
Value = StringDescriptor(
'Value', _required) # type: str
def __init__(self, Name=None, Value=None, **kwargs):
"""
Parameters
----------
Name : str
Value : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Name = Name
self.Value = Value
super(FreeFormType, self).__init__(**kwargs)
class CompoundCommentType(Serializable):
_fields = ('Value', 'Comments')
_required = ()
_collections_tags = {'Comments': {'array': False, 'child_tag': 'NULL'}}
# descriptors
Value = StringDescriptor(
'Value', _required,
docstring='A single comment, this will take precedence '
'over the list') # type: Optional[str]
Comments = SerializableListDescriptor(
'Comments', FreeFormType, _collections_tags, _required,
docstring='A collection of comments') # type: Optional[List[FreeFormType]]
def __init__(self, Value=None, Comments=None, **kwargs):
"""
Parameters
----------
Value : None|str
Comments : None|List[FreeFormType]
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Value = Value
self.Comments = Comments
super(CompoundCommentType, self).__init__(**kwargs)
@classmethod
def from_node(cls, node, xml_ns, ns_key=None, kwargs=None):
if kwargs is None:
kwargs = {}
if xml_ns is None:
tag_start = ''
elif ns_key is None:
tag_start = xml_ns['default'] + ':'
else:
tag_start = xml_ns[ns_key] + ':'
if node.text:
kwargs['Value'] = node.text
kwargs['Comments'] = None
else:
value = []
for element in node:
tag_name = element.tag[len(tag_start):]
value.append(FreeFormType(Name=tag_name, Value=element.text))
kwargs['Value'] = None
kwargs['Comments'] = value
return super(CompoundCommentType, cls).from_node(node, xml_ns, ns_key=ns_key, kwargs=kwargs)
def to_node(self, doc, tag, ns_key=None, parent=None, check_validity=False, strict=DEFAULT_STRICT, exclude=()):
the_tag = '{}:{}'.format(ns_key, tag) if ns_key is not None else tag
if self.Value is not None:
node = create_text_node(doc, the_tag, self.Value, parent=parent)
else:
node = create_new_node(doc, the_tag, parent=parent)
if self.Comments is not None:
for entry in self.Comments:
child_tag = '{}:{}'.format(ns_key, entry.Name) if ns_key is not None else entry.Name
create_text_node(doc, child_tag, entry.Value, parent=node)
return node
class TheObjectType(Serializable):
_fields = (
'SystemName', 'SystemComponent', 'NATOName', 'Function', 'Version', 'DecoyType', 'SerialNumber',
'ObjectClass', 'ObjectSubClass', 'ObjectTypeClass', 'ObjectType', 'ObjectLabel',
'SlantPlane', 'GroundPlane', 'Size', 'Orientation',
'Articulation', 'Configuration',
'Accessories', 'PaintScheme', 'Camouflage', 'Obscuration', 'ObscurationPercent', 'ImageLevelObscuration',
'ImageLocation', 'GeoLocation',
'TargetToClutterRatio', 'VisualQualityMetric',
'UnderlyingTerrain', 'OverlyingTerrain', 'TerrainTexture', 'SeasonalCover')
_required = ('SystemName', 'ImageLocation', 'GeoLocation')
# descriptors
SystemName = StringDescriptor(
'SystemName', _required, strict=DEFAULT_STRICT,
docstring='Name of the object.') # type: str
SystemComponent = StringDescriptor(
'SystemComponent', _required, strict=DEFAULT_STRICT,
docstring='Name of the weapon system component.') # type: Optional[str]
NATOName = StringDescriptor(
'NATOName', _required, strict=DEFAULT_STRICT,
docstring='Name of the object in NATO naming convention.') # type: Optional[str]
Function = StringDescriptor(
'Function', _required, strict=DEFAULT_STRICT,
docstring='Function of the object.') # type: Optional[str]
Version = StringDescriptor(
'Version', _required, strict=DEFAULT_STRICT,
docstring='Version number of the object.') # type: Optional[str]
DecoyType = StringDescriptor(
'DecoyType', _required, strict=DEFAULT_STRICT,
docstring='Object is a decoy or surrogate.') # type: Optional[str]
SerialNumber = StringDescriptor(
'SerialNumber', _required, strict=DEFAULT_STRICT,
docstring='Serial number of the object.') # type: Optional[str]
# label elements
ObjectClass = StringDescriptor(
'ObjectClass', _required, strict=DEFAULT_STRICT,
docstring='Top level class indicator; e.g., Aircraft, Ship, '
'Ground Vehicle, Missile Launcher, etc.') # type: Optional[str]
ObjectSubClass = StringDescriptor(
'ObjectSubClass', _required, strict=DEFAULT_STRICT,
docstring='Sub-class indicator; e.g., military, commercial') # type: Optional[str]
ObjectTypeClass = StringDescriptor(
'ObjectTypeClass', _required, strict=DEFAULT_STRICT,
docstring='Object type class indicator; e.g., '
'for Aircraft/Military - Propeller, Jet') # type: Optional[str]
ObjectType = StringDescriptor(
'ObjectType', _required, strict=DEFAULT_STRICT,
docstring='Object type indicator, e.g., '
'for Aircraft/Military/Jet - Bomber, Fighter') # type: Optional[str]
ObjectLabel = StringDescriptor(
'ObjectLabel', _required, strict=DEFAULT_STRICT,
docstring='Object label indicator, e.g., '
'for Bomber - Il-28, Tu-22M, Tu-160') # type: Optional[str]
SlantPlane = SerializableDescriptor(
'SlantPlane', PlanePhysicalType, _required, strict=DEFAULT_STRICT,
docstring='Object physical definition in the slant plane') # type: Optional[PlanePhysicalType]
GroundPlane = SerializableDescriptor(
'GroundPlane', PlanePhysicalType, _required, strict=DEFAULT_STRICT,
docstring='Object physical definition in the ground plane') # type: Optional[PlanePhysicalType]
# specific physical quantities
Size = SerializableDescriptor(
'Size', SizeType, _required, strict=DEFAULT_STRICT,
docstring='The actual physical size of the object') # type: Optional[SizeType]
Orientation = SerializableDescriptor(
'Orientation', OrientationType, _required, strict=DEFAULT_STRICT,
docstring='The actual orientation size of the object') # type: Optional[OrientationType]
Articulation = SerializableDescriptor(
'Articulation', CompoundCommentType, _required,
docstring='Articulation description(s)') # type: Optional[CompoundCommentType]
Configuration = SerializableDescriptor(
'Configuration', CompoundCommentType, _required,
docstring='Configuration description(s)') # type: Optional[CompoundCommentType]
Accessories = StringDescriptor(
'Accessories', _required, strict=DEFAULT_STRICT,
docstring='Defines items that are out of the norm, or have been added or removed.') # type: Optional[str]
PaintScheme = StringDescriptor(
'PaintScheme', _required, strict=DEFAULT_STRICT,
docstring='Paint scheme of object (e.g. olive drab, compass ghost grey, etc.).') # type: Optional[str]
Camouflage = StringDescriptor(
'Camouflage', _required, strict=DEFAULT_STRICT,
docstring='Details the camouflage on the object.') # type: Optional[str]
Obscuration = StringDescriptor(
'Obscuration', _required, strict=DEFAULT_STRICT,
docstring='General description of the obscuration.') # type: Optional[str]
ObscurationPercent = FloatDescriptor(
'ObscurationPercent', _required, strict=DEFAULT_STRICT,
docstring='The percent obscuration.') # type: Optional[float]
ImageLevelObscuration = StringDescriptor(
'ImageLevelObscuration', _required, strict=DEFAULT_STRICT,
docstring='Specific description of the obscuration based on the sensor look angle.') # type: Optional[str]
# location of the labeled item
ImageLocation = SerializableDescriptor(
'ImageLocation', ImageLocationType, _required, strict=DEFAULT_STRICT,
docstring='') # type: ImageLocationType
GeoLocation = SerializableDescriptor(
'GeoLocation', GeoLocationType, _required, strict=DEFAULT_STRICT,
docstring='') # type: GeoLocationType
# text quality descriptions
TargetToClutterRatio = StringDescriptor(
'TargetToClutterRatio', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
VisualQualityMetric = StringDescriptor(
'VisualQualityMetric', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
UnderlyingTerrain = StringDescriptor(
'UnderlyingTerrain', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
OverlyingTerrain = StringDescriptor(
'OverlyingTerrain', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
TerrainTexture = StringDescriptor(
'TerrainTexture', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
SeasonalCover = StringDescriptor(
'SeasonalCover', _required, strict=DEFAULT_STRICT,
docstring='') # type: Optional[str]
def __init__(self, SystemName=None, SystemComponent=None, NATOName=None,
Function=None, Version=None, DecoyType=None, SerialNumber=None,
ObjectClass=None, ObjectSubClass=None, ObjectTypeClass=None,
ObjectType=None, ObjectLabel=None,
SlantPlane=None, GroundPlane=None,
Size=None, Orientation=None,
Articulation=None, Configuration=None,
Accessories=None, PaintScheme=None, Camouflage=None,
Obscuration=None, ObscurationPercent=None, ImageLevelObscuration=None,
ImageLocation=None, GeoLocation=None,
TargetToClutterRatio=None, VisualQualityMetric=None,
UnderlyingTerrain=None, OverlyingTerrain=None,
TerrainTexture=None, SeasonalCover=None,
**kwargs):
"""
Parameters
----------
SystemName : str
SystemComponent : None|str
NATOName : None|str
Function : None|str
Version : None|str
DecoyType : None|str
SerialNumber : None|str
ObjectClass : None|str
ObjectSubClass : None|str
ObjectTypeClass : None|str
ObjectType : None|str
ObjectLabel : None|str
SlantPlane : None|PlanePhysicalType
GroundPlane : None|PlanePhysicalType
Size : None|SizeType|numpy.ndarray|list|tuple
Orientation : OrientationType
Articulation : None|CompoundCommentType|str|List[FreeFormType]
Configuration : None|CompoundCommentType|str|List[FreeFormType]
Accessories : None|str
PaintScheme : None|str
Camouflage : None|str
Obscuration : None|str
ObscurationPercent : None|float
ImageLevelObscuration : None|str
ImageLocation : ImageLocationType
GeoLocation : GeoLocationType
TargetToClutterRatio : None|str
VisualQualityMetric : None|str
UnderlyingTerrain : None|str
OverlyingTerrain : None|str
TerrainTexture : None|str
SeasonalCover : None|str
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.SystemName = SystemName
self.SystemComponent = SystemComponent
self.NATOName = NATOName
self.Function = Function
self.Version = Version
self.DecoyType = DecoyType
self.SerialNumber = SerialNumber
self.ObjectClass = ObjectClass
self.ObjectSubClass = ObjectSubClass
self.ObjectTypeClass = ObjectTypeClass
self.ObjectType = ObjectType
self.ObjectLabel = ObjectLabel
self.SlantPlane = SlantPlane
self.GroundPlane = GroundPlane
self.Size = Size
self.Orientation = Orientation
if isinstance(Articulation, string_types):
self.Articulation = CompoundCommentType(Value=Articulation)
elif isinstance(Articulation, list):
self.Articulation = CompoundCommentType(Comments=Articulation)
elif isinstance(Articulation, dict):
self.Articulation = CompoundCommentType(**Articulation)
else:
self.Articulation = Articulation
if isinstance(Configuration, string_types):
self.Configuration = CompoundCommentType(Value=Configuration)
elif isinstance(Configuration, list):
self.Configuration = CompoundCommentType(Comments=Configuration)
elif isinstance(Configuration, dict):
self.Configuration = CompoundCommentType(**Configuration)
else:
self.Configuration = Configuration
self.Accessories = Accessories
self.PaintScheme = PaintScheme
self.Camouflage = Camouflage
self.Obscuration = Obscuration
self.ObscurationPercent = ObscurationPercent
self.ImageLevelObscuration = ImageLevelObscuration
self.ImageLocation = ImageLocation
self.GeoLocation = GeoLocation
self.TargetToClutterRatio = TargetToClutterRatio
self.VisualQualityMetric = VisualQualityMetric
self.UnderlyingTerrain = UnderlyingTerrain
self.OverlyingTerrain = OverlyingTerrain
self.TerrainTexture = TerrainTexture
self.SeasonalCover = SeasonalCover
super(TheObjectType, self).__init__(**kwargs)
@staticmethod
def _check_placement(rows, cols, row_bounds, col_bounds, overlap_cutoff=0.5):
"""
Checks the bounds condition for the provided box.
Here inclusion is defined by what proportion of the area of the proposed
chip is actually contained inside the image bounds.
Parameters
----------
rows : int|float
The number of rows in the image.
cols : int|float
The number of columns in the image.
row_bounds : List
Of the form `[row min, row max]`
col_bounds : List
Of the form `[col min, col max]`
overlap_cutoff : float
Determines the transition from in the periphery to out of the image.
Returns
-------
int
1 - completely in the image
2 - the proposed chip has `overlap_cutoff <= fractional contained area < 1`
3 - the proposed chip has `fractional contained area < overlap_cutoff`
"""
if row_bounds[1] <= row_bounds[0] or col_bounds[1] <= col_bounds[0]:
raise ValueError('bounds out of order')
if 0 <= row_bounds[0] and rows < row_bounds[1] and 0 <= col_bounds[0] and cols < col_bounds[1]:
return 1 # completely in bounds
row_size = row_bounds[1] - row_bounds[0]
col_size = col_bounds[1] - col_bounds[0]
first_row, last_row = max(0, row_bounds[0]), min(rows, row_bounds[1])
first_col, last_col = max(0, col_bounds[0]), min(cols, col_bounds[1])
area_overlap = (last_row - first_row)*(last_col - first_col)
if area_overlap >= overlap_cutoff*row_size*col_size:
return 2 # the item is at the periphery
else:
return 3 # it should be considered out of range
def set_image_location_from_sicd(self, sicd, populate_in_periphery=False):
"""
Set the image location information with respect to the given SICD,
assuming that the physical coordinates are populated.
Parameters
----------
sicd : SICDType
populate_in_periphery : bool
Returns
-------
int
-1 - insufficient metadata to proceed or other failure
0 - nothing to be done
1 - successful
2 - object in the image periphery, populating based on `populate_in_periphery`
3 - object not in the image field
"""
if self.ImageLocation is not None:
# no need to infer anything, it's already populated
return 0
if self.GeoLocation is None:
logger.warning(
'GeoLocation is not populated,\n\t'
'so the image location can not be inferred')
return -1
if not sicd.can_project_coordinates():
logger.warning(
'This sicd does not permit projection,\n\t'
'so the image location can not be inferred')
return -1
# gets the prospective image location
image_location = ImageLocationType.from_geolocation(self.GeoLocation, sicd)
if image_location is None:
return -1
# get nominal object size in meters and pixels
if self.Size is None:
row_size = 2.0
col_size = 2.0
else:
max_size = self.Size.get_max_diameter()
row_size = max_size/sicd.Grid.Row.SS
col_size = max_size/sicd.Grid.Col.SS
# check bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
center_pixel = image_location.CenterPixel.get_array(dtype='float64')
row_bounds = [center_pixel[0] - 0.5*row_size, center_pixel[0] + 0.5*row_size]
col_bounds = [center_pixel[1] - 0.5*col_size, center_pixel[1] + 0.5*col_size]
placement = self._check_placement(rows, cols, row_bounds, col_bounds)
if placement == 3:
return placement
if placement == 2 and not populate_in_periphery:
return placement
self.ImageLocation = image_location
return placement
def set_geo_location_from_sicd(self, sicd, projection_type='HAE', **kwargs):
"""
Set the geographical location information with respect to the given SICD,
assuming that the image coordinates are populated.
.. Note::
This assumes that the image coordinates are with respect to the given
image (chip), and NOT including any sicd.ImageData.FirstRow/Col values,
which will be added here.
Parameters
----------
sicd : SICDType
projection_type : str
The projection type selector, one of `['PLANE', 'HAE', 'DEM']`. Using `'DEM'`
requires configuration for the DEM pathway described in
:func:`sarpy.geometry.point_projection.image_to_ground_dem`.
kwargs
The keyword arguments for the :func:`SICDType.project_image_to_ground_geo` method.
"""
if self.GeoLocation is not None:
# no need to infer anything, it's already populated
return
if self.ImageLocation is None:
logger.warning(
'ImageLocation is not populated,\n\t'
'so the geographical location can not be inferred')
return
if not sicd.can_project_coordinates():
logger.warning(
'This sicd does not permit projection,\n\t'
'so the geographical location can not be inferred')
return
self.GeoLocation = GeoLocationType.from_image_location(
self.ImageLocation, sicd, projection_type=projection_type, **kwargs)
def set_chip_details_from_sicd(self, sicd, layover_shift=False, populate_in_periphery=False):
"""
Set the chip information with respect to the given SICD, assuming that the
image location and size are defined.
Parameters
----------
sicd : SICDType
layover_shift : bool
Shift based on layover direction? This should be `True` if the identification of
the bounds and/or center pixel do not include any layover, as in
populating location from known ground truth. This should be `False` if
the identification of bounds and/or center pixel do include layover,
potentially as based on annotation of the imagery itself in pixel
space.
populate_in_periphery : bool
Should we populate for peripheral?
Returns
-------
int
-1 - insufficient metadata to proceed
0 - nothing to be done
1 - successful
2 - object in the image periphery, populating based on `populate_in_periphery`
3 - object not in the image field
"""
if self.SlantPlane is not None:
# no need to infer anything, it's already populated
return 0
if self.Size is None:
logger.warning(
'Size is not populated,\n\t'
'so the chip size can not be inferred')
return -1
if self.ImageLocation is None:
# try to set from geolocation
return_value = self.set_image_location_from_sicd(sicd, populate_in_periphery=populate_in_periphery)
if return_value in [-1, 3] or (return_value == 2 and not populate_in_periphery):
return return_value
# get nominal object size, in meters
max_size = self.Size.get_max_diameter() # in meters
row_size = max_size/sicd.Grid.Row.SS # in pixels
col_size = max_size/sicd.Grid.Col.SS # in pixels
# get nominal image box
image_location = self.ImageLocation
pixel_box = image_location.get_nominal_box(row_length=row_size, col_length=col_size)
ground_unit_norm = wgs_84_norm(sicd.GeoData.SCP.ECF.get_array())
slant_plane_unit_norm = numpy.cross(sicd.Grid.Row.UVectECF.get_array(), sicd.Grid.Col.UVectECF.get_array())
magnitude_factor = ground_unit_norm.dot(slant_plane_unit_norm)
# determines the relative size of things in slant plane versus ground plane
# get nominal layover vector - should be pointed generally towards the top (negative rows value)
layover_magnitude = sicd.SCPCOA.LayoverMagnitude
if layover_magnitude is None:
layover_magnitude = 0.25
layover_size = self.Size.Height*layover_magnitude*magnitude_factor
if sicd.SCPCOA.LayoverAng is None:
layover_angle = 0.0
else:
layover_angle = numpy.deg2rad(sicd.SCPCOA.LayoverAng - sicd.SCPCOA.AzimAng)
layover_vector = layover_size*numpy.array(
[numpy.cos(layover_angle)/sicd.Grid.Row.SS, numpy.sin(layover_angle)/sicd.Grid.Col.SS])
# craft the layover box
if layover_shift:
layover_box = pixel_box + layover_vector
else:
layover_box = pixel_box
# determine the maximum and minimum pixel values here
min_rows = min(numpy.min(pixel_box[:, 0]), numpy.min(layover_box[:, 0]))
max_rows = max(numpy.max(pixel_box[:, 0]), numpy.max(layover_box[:, 0]))
min_cols = min(numpy.min(pixel_box[:, 1]), numpy.min(layover_box[:, 1]))
max_cols = max(numpy.max(pixel_box[:, 1]), numpy.max(layover_box[:, 1]))
# determine the padding amount
row_pad = min(5, 0.3*(max_rows-min_rows))
col_pad = min(5, 0.3*(max_cols-min_cols))
# check our bounding information
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
chip_rows = [min_rows - row_pad, max_rows + row_pad]
chip_cols = [min_cols - col_pad, max_cols + col_pad]
placement = self._check_placement(rows, cols, chip_rows, chip_cols)
if placement == 3 or (placement == 2 and not populate_in_periphery):
return placement
# set the physical data ideal chip size
physical = PhysicalType.from_ranges(chip_rows, chip_cols, rows, cols)
# determine nominal shadow vector
shadow_magnitude = sicd.SCPCOA.ShadowMagnitude
if shadow_magnitude is None:
shadow_magnitude = 1.0
shadow_size = self.Size.Height*shadow_magnitude*magnitude_factor
shadow_angle = sicd.SCPCOA.Shadow
shadow_angle = numpy.pi if shadow_angle is None else numpy.deg2rad(shadow_angle)
shadow_vector = shadow_size*numpy.array(
[numpy.cos(shadow_angle)/sicd.Grid.Row.SS, numpy.sin(shadow_angle)/sicd.Grid.Col.SS])
shadow_box = pixel_box + shadow_vector
min_rows = min(min_rows, numpy.min(shadow_box[:, 0]))
max_rows = max(max_rows, numpy.max(shadow_box[:, 0]))
min_cols = min(min_cols, numpy.min(shadow_box[:, 1]))
max_cols = max(max_cols, numpy.max(shadow_box[:, 1]))
chip_rows = [min_rows - row_pad, max_rows + row_pad]
chip_cols = [min_cols - col_pad, max_cols + col_pad]
# set the physical with shadows data ideal chip size
physical_with_shadows = PhysicalType.from_ranges(chip_rows, chip_cols, rows, cols)
self.SlantPlane = PlanePhysicalType(
Physical=physical,
PhysicalWithShadows=physical_with_shadows)
return placement
def get_image_geometry_object_for_sicd(self, include_chip=False):
"""
Gets the geometry element describing the image geometry for a sicd.
Returns
-------
Geometry
"""
if self.ImageLocation is None:
raise ValueError('No ImageLocation defined.')
image_geometry_object = self.ImageLocation.get_geometry_object()
if include_chip and self.SlantPlane is not None:
center_pixel = self.SlantPlane.Physical.CenterPixel.get_array()
chip_size = self.SlantPlane.Physical.ChipSize.get_array()
shift = numpy.array([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], dtype='float64')
shift[:, 0] *= chip_size[0]
shift[:, 1] *= chip_size[1]
chip_rect = center_pixel + shift
chip_area = Polygon(coordinates=[chip_rect, ])
if isinstance(image_geometry_object, GeometryCollection):
image_geometry_object.geometries.append(chip_area)
return image_geometry_object
else:
return GeometryCollection(geometries=[image_geometry_object, chip_area])
return image_geometry_object
# other types for the DetailObjectInfo
class NominalType(Serializable):
_fields = ('ChipSize', )
_required = _fields
ChipSize = SerializableDescriptor(
'ChipSize', RangeCrossRangeType, _required, strict=DEFAULT_STRICT,
docstring='The nominal chip size used for every object in the dataset, '
'in the appropriate plane') # type: RangeCrossRangeType
def __init__(self, ChipSize=None, **kwargs):
"""
Parameters
----------
ChipSize : RangeCrossRangeType|numpy.ndarray|list|tuple
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.ChipSize = ChipSize
super(NominalType, self).__init__(**kwargs)
class PlaneNominalType(Serializable):
_fields = ('Nominal', )
_required = _fields
Nominal = SerializableDescriptor(
'Nominal', NominalType, _required,
docstring='Nominal chip details in the appropriate plane') # type: NominalType
def __init__(self, Nominal=None, **kwargs):
"""
Parameters
----------
Nominal : NominalType
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Nominal = Nominal
super(PlaneNominalType, self).__init__(**kwargs)
# the main type
class DetailObjectInfoType(Serializable):
_fields = (
'NumberOfObjectsInImage', 'NumberOfObjectsInScene',
'SlantPlane', 'GroundPlane', 'Objects')
_required = (
'NumberOfObjectsInImage', 'NumberOfObjectsInScene', 'Objects')
_collections_tags = {'Objects': {'array': False, 'child_tag': 'Object'}}
# descriptors
NumberOfObjectsInImage = IntegerDescriptor(
'NumberOfObjectsInImage', _required, strict=DEFAULT_STRICT,
docstring='Number of ground truthed objects in the image.') # type: int
NumberOfObjectsInScene = IntegerDescriptor(
'NumberOfObjectsInScene', _required, strict=DEFAULT_STRICT,
docstring='Number of ground truthed objects in the scene.') # type: int
SlantPlane = SerializableDescriptor(
'SlantPlane', PlaneNominalType, _required,
docstring='Default chip sizes in the slant plane.') # type: Optional[PlaneNominalType]
GroundPlane = SerializableDescriptor(
'GroundPlane', PlaneNominalType, _required,
docstring='Default chip sizes in the ground plane.') # type: Optional[PlaneNominalType]
Objects = SerializableListDescriptor(
'Objects', TheObjectType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='The object collection') # type: List[TheObjectType]
def __init__(self, NumberOfObjectsInImage=None, NumberOfObjectsInScene=None,
SlantPlane=None, GroundPlane=None, Objects=None, **kwargs):
"""
Parameters
----------
NumberOfObjectsInImage : int
NumberOfObjectsInScene : int
SlantPlane : None|SlantPlaneNominalType
GroundPlane : None|GroundPlaneNominalType
Objects : List[ObjectType]
kwargs
Other keyword arguments
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.NumberOfObjectsInImage = NumberOfObjectsInImage
self.NumberOfObjectsInScene = NumberOfObjectsInScene
self.SlantPlane = SlantPlane
self.GroundPlane = GroundPlane
self.Objects = Objects
super(DetailObjectInfoType, self).__init__(**kwargs)
def set_image_location_from_sicd(
self, sicd, layover_shift=True, populate_in_periphery=False, include_out_of_range=False):
"""
Set the image location information with respect to the given SICD,
assuming that the physical coordinates are populated. The `NumberOfObjectsInImage`
will be set, and `NumberOfObjectsInScene` will be left unchanged.
Parameters
----------
sicd : SICDType
layover_shift : bool
Account for possible layover shift in calculated chip sizes?
populate_in_periphery : bool
Populate image information for objects on the periphery?
include_out_of_range : bool
Include the objects which are out of range (with no image location information)?
"""
def update_object(temp_object, in_image_count):
status = temp_object.set_image_location_from_sicd(
sicd, populate_in_periphery=populate_in_periphery)
use_object = False
if status == 0:
raise ValueError('Object already has image details set')
if status == 1 or (status == 2 and populate_in_periphery):
use_object = True
temp_object.set_chip_details_from_sicd(
sicd, layover_shift=layover_shift, populate_in_periphery=True)
in_image_count += 1
return use_object, in_image_count
objects_in_image = 0
if include_out_of_range:
# the objects list is just modified in place
for the_object in self.Objects:
_, objects_in_image = update_object(the_object, objects_in_image)
else:
# we make a new objects list
objects = []
for the_object in self.Objects:
use_this_object, objects_in_image = update_object(the_object, objects_in_image)
if use_this_object:
objects.append(the_object)
self.Objects = objects
self.NumberOfObjectsInImage = objects_in_image
| 39.483095 | 115 | 0.63284 |
7a6e2aebf0bcfc76362d80f0333b6c2549bddf95 | 209 | py | Python | erptask/erptask/doctype/erptask/test_erptask.py | beshoyAtefZaki/erptask | 85eb67a1ef9618994a9d39d50cfc5ec05c17d74c | [
"MIT"
] | null | null | null | erptask/erptask/doctype/erptask/test_erptask.py | beshoyAtefZaki/erptask | 85eb67a1ef9618994a9d39d50cfc5ec05c17d74c | [
"MIT"
] | null | null | null | erptask/erptask/doctype/erptask/test_erptask.py | beshoyAtefZaki/erptask | 85eb67a1ef9618994a9d39d50cfc5ec05c17d74c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Beshoy Atef and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class Testerptask(unittest.TestCase):
pass
| 19 | 50 | 0.76555 |
7a91af179ab54191171bda12b911b857c2d9bd22 | 6,764 | py | Python | framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLars.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | 1 | 2022-03-10T18:54:09.000Z | 2022-03-10T18:54:09.000Z | framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLars.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | null | null | null | framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLars.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Lasso model fit with Least Angle Regression a.k.a. Lars
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
from numpy import finfo
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LassoLars(ScikitLearnBase):
"""
Lasso model fit with Least Angle Regression
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.LassoLars
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LassoLars, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LassoLars} (\textit{Lasso model fit with Least Angle Regression})
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is:
\begin{equation}
(1 / (2 * n\_samples)) * ||y - Xw||^2\_2 + alpha * ||w||\_1
\end{equation}
\zNormalizationNotPerformed{LassoLars}
"""
specs.addSub(InputData.parameterInputFactory("alpha", contentType=InputTypes.FloatType,
descr=r"""Constant that multiplies the L1 term. Defaults to 1.0.
$alpha = 0$ is equivalent to an ordinary least square, solved by
the LinearRegression object. For numerical reasons, using $alpha = 0$
with the Lasso object is not advised.""", default=1.0))
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or not. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=False))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of iterations.""", default=500))
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType,
descr=r"""When set to True, forces the coefficients to be positive.""", default=False))
# New in sklearn version 0.23
# specs.addSub(InputData.parameterInputFactory("jitter", contentType=InputTypes.FloatType,
# descr=r"""Upper bound on a uniform noise parameter to be added to the y values,
# to satisfy the model’s assumption of one-at-a-time computations. Might help
# with stability.""", default=None))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Amount of verbosity.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['alpha','fit_intercept', 'normalize', 'precompute',
'max_iter','eps','positive', 'verbose'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
| 59.858407 | 136 | 0.560615 |
d7647beab46de9c8ac6f680970f88588a8a6e7de | 4,360 | py | Python | google/cloud/security/common/gcp_type/backend_service.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
] | 1 | 2018-03-26T08:15:21.000Z | 2018-03-26T08:15:21.000Z | google/cloud/security/common/gcp_type/backend_service.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
] | null | null | null | google/cloud/security/common/gcp_type/backend_service.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Compute Backend Service.
See: https://cloud.google.com/compute/docs/reference/latest/backendServices
"""
import os
from google.cloud.security.common.gcp_type import key
from google.cloud.security.common.gcp_type import resource
from google.cloud.security.common.util import parser
# pylint: disable=too-many-instance-attributes
class BackendService(resource.Resource):
"""Represents BackendService resource."""
def __init__(self, **kwargs):
"""BackendService resource.
Args:
**kwargs: The object's attributes.
"""
super(BackendService, self).__init__(
resource_id=kwargs.get('id'),
resource_type=resource.ResourceType.BACKEND_SERVICE,
name=kwargs.get('name'),
display_name=kwargs.get('name'))
self.affinity_cookie_ttl_sec = kwargs.get('affinity_cookie_ttl_sec')
self.backends = parser.json_unstringify(kwargs.get('backends'))
self.cdn_policy = parser.json_unstringify(kwargs.get('cdn_policy'))
self.connection_draining = parser.json_unstringify(
kwargs.get('connection_draining'))
self.creation_timestamp = kwargs.get('creation_timestamp')
self.description = kwargs.get('description')
self.enable_cdn = kwargs.get('enable_cdn')
self.health_checks = parser.json_unstringify(
kwargs.get('health_checks'))
self.iap = parser.json_unstringify(kwargs.get('iap'))
self.load_balancing_scheme = kwargs.get('load_balancing_scheme')
self.port = kwargs.get('port')
self.port_name = kwargs.get('port_name')
self.project_id = kwargs.get('project_id')
self.protocol = kwargs.get('protocol')
self.region = kwargs.get('region')
self.resource_id = kwargs.get('id')
self.session_affinity = kwargs.get('session_affinity')
self.timeout_sec = kwargs.get('timeout_sec')
@property
def key(self):
"""Returns a Key identifying the object.
Returns:
Key: the key
"""
return Key.from_args(self.project_id, self.name, region=self.region)
KEY_OBJECT_KIND = 'BackendService'
class Key(key.Key):
"""An identifier for a specific backend service."""
# Backend services can be regional or global.
@staticmethod
def from_args(project_id, name, region=None):
"""Construct a Key from specific values.
Args:
project_id (str): project_id
name (str): name
region (str): region (optional)
Returns:
Key: the key
"""
if region:
region = os.path.basename(region)
return Key(KEY_OBJECT_KIND, {
'project_id': project_id,
'name': name,
'region': region})
@staticmethod
def from_url(url):
"""Construct a Key from a URL.
Args:
url (str): Object reference URL
Returns:
Key: the key
Raises:
ValueError: Required parameters are missing.
"""
obj = Key._from_url(
KEY_OBJECT_KIND,
{'projects': 'project_id',
'regions': 'region',
'backendServices': 'name'},
url)
if not obj.project_id or not obj.name:
raise ValueError('Missing fields in URL %r' % url)
return obj
@property
def project_id(self):
"""Object property: project_id
Returns:
str: project_id
"""
return self._path_component('project_id')
@property
def name(self):
"""Object property: name
Returns:
str: name
"""
return self._path_component('name')
| 31.366906 | 76 | 0.631651 |
9d36c0a2016c1f7cf825f268d9272587f5d7034f | 4,610 | py | Python | husky_directory/services/reducer.py | UWIT-IAM/uw-husky-directory | 0eae8ca8fddec183964adfd26f4935357eae963d | [
"MIT"
] | null | null | null | husky_directory/services/reducer.py | UWIT-IAM/uw-husky-directory | 0eae8ca8fddec183964adfd26f4935357eae963d | [
"MIT"
] | 87 | 2020-11-17T20:31:25.000Z | 2022-03-31T16:37:45.000Z | husky_directory/services/reducer.py | UWIT-IAM/uw-husky-directory | 0eae8ca8fddec183964adfd26f4935357eae963d | [
"MIT"
] | null | null | null | from collections import OrderedDict
from functools import cached_property
from logging import Logger
from typing import Dict, Optional, Tuple
from injector import inject
from husky_directory.models.pws import ListPersonsOutput, NamedIdentity, ResultBucket
from husky_directory.util import is_similar, readable_list
class NamedIdentityAnalyzer:
def __init__(
self, entity: NamedIdentity, query_string: str, fuzziness: float = 0.25
):
self.entity = entity
self.query_string = query_string
self.fuzziness = fuzziness
self.cmp_name = entity.display_name.lower()
self.cmp_surname = entity.displayed_surname.lower()
self.cmp_first_name = entity.displayed_first_name.lower()
self.cmp_query = query_string.lower()
self.cmp_query_tokens = self.cmp_query.split()
self.num_query_tokens = len(self.cmp_query_tokens)
@cached_property
def name_matches_query(self) -> bool:
return self.cmp_name == self.cmp_query
@cached_property
def last_name_matches_query(self) -> bool:
return self.cmp_surname == self.cmp_query
@cached_property
def first_name_matches_query(self) -> bool:
return self.cmp_first_name == self.cmp_query
@cached_property
def first_name_starts_with_query(self) -> bool:
return self.cmp_first_name.startswith(self.cmp_query)
@cached_property
def last_name_starts_with_query(self) -> bool:
return self.cmp_surname.startswith(self.cmp_query)
@cached_property
def all_query_tokens_in_name(self) -> bool:
return all(token in self.cmp_name for token in self.cmp_query_tokens)
@cached_property
def name_is_similar_to_query(self) -> bool:
return is_similar(
query=self.cmp_query, display_name=self.cmp_name, fuzziness=self.fuzziness
)
@cached_property
def relevant_bucket(self) -> Optional[Tuple[str, int]]:
"""
:return: A tuple whose first entry is the bucket description, and whose second
entry is the bucket priority/sort key. This helps to make sure that
results are printed to users in order of (what we declare as) relevance.
"""
if self.name_matches_query:
return f'Name is "{self.query_string}"', 1
if self.last_name_matches_query:
return f'Last name is "{self.query_string}"', 2
if self.first_name_matches_query:
return f'First name is "{self.query_string}"', 3
if self.last_name_starts_with_query:
return f'Last name starts with "{self.query_string}"', 4
if self.first_name_starts_with_query:
return f'First name starts with "{self.query_string}"', 5
if self.name_is_similar_to_query:
return f'Name is similar to "{self.query_string}"', 6
if self.all_query_tokens_in_name:
readable = readable_list(self.query_string.split())
if len(self.cmp_query_tokens) > 2:
return f"Name contains all of {readable}", 7
return f"Name contains {readable}", 7
class NameSearchResultReducer:
@inject
def __init__(self, logger: Logger):
self.duplicate_netids = set()
self.duplicate_hit_count = 0
self.logger = logger
def reduce_output(
self,
output: ListPersonsOutput,
query_string: str,
buckets: Optional[Dict[str, ResultBucket]] = None,
) -> Dict[str, ResultBucket]:
buckets = buckets or {}
for pws_person in output.persons:
if pws_person.netid in self.duplicate_netids:
self.duplicate_hit_count += 1
continue
analyzer = NamedIdentityAnalyzer(
entity=pws_person, query_string=query_string
)
bucket, relevance = analyzer.relevant_bucket or (None, None)
if not bucket:
# This is unlikely to happen unless PWS starts serving
# some highly irrelevant results for some reason
self.logger.info(
f"Could not find relevant bucket for person {pws_person.display_name} matching "
f"query {query_string}"
)
continue
if bucket not in buckets:
buckets[bucket] = ResultBucket(description=bucket, relevance=relevance)
buckets[bucket].add_person(pws_person)
self.duplicate_netids.add(pws_person.netid)
return OrderedDict(sorted(buckets.items(), key=lambda i: i[1].relevance))
| 37.177419 | 100 | 0.655531 |
98a8dc79f6fd8d18be085c8627f5cc58cb0f4276 | 5,623 | py | Python | src/manipin_json/jsondef.py | deeso/json-search-replace | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | 1 | 2019-02-08T14:42:45.000Z | 2019-02-08T14:42:45.000Z | src/manipin_json/jsondef.py | deeso/manipin-json | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | null | null | null | src/manipin_json/jsondef.py | deeso/manipin-json | d1dd75cfaecb65bf8fcbad0c80a0bd839eccaa8d | [
"Apache-2.0"
] | null | null | null | # define a key matching language
# -- E:{'k1':v1, 'k2':v2} <-- Exact structure match
# -- S:{'k1':v1 } <-- Only this value is needed structure match
# -- E:[v1, v2] <-- Exact structure match
# -- S:[v1, v2] <-- Only values v1 and v2 are needed
# -- E:{'k1':v1, 'k2':v2} <-- Exact structure match
# -- {'k1':v1, 'k2':XX} <-- Replace XX with value
# -- S:{'k1':v1 } <-- Only this value is needed structure match
# -- {'k1':v1, 'k2':XX } <-- insert 'k2' with value XX
# -- E:[v1, v2] <-- Exact structure match
# -- [v1, v2, XX] <-- Insert XX
# -- S:[v1, v2] <-- Only values v1 and v2 are needed
# -- [v1, XX] <-- remove v2 and insert XX
D = 'D' # dict
L = 'L' # list
S = 'S' # string
I = 'I' # int
N = 'N' # null/none
Z = 'Z' # boolean
P_TYPES = [S, I, N, Z]
C_TYPES = [D, L]
A_TYPES = P_TYPES + C_TYPES
# python types allowed
CP_TYPES = [type({}), type(set()), type([])]
PP_TYPES = [type(True), type(""), type(b""), type(0), type(None)]
AP_TYPES = CP_TYPES + PP_TYPES
# python json type mapping
JP_MAP = {'D': [type({})], 'L': [type(set()), type([])],
'S': [type(""), type(b"")], 'I': [type(0)],
'N': type(None), 'Z': type(True)}
PJ_MAP = {type({}): 'D',
type(set()): 'L', type([]): 'L',
type(""): 'S', type(b""): 'S',
type(0): 'I', type(None): 'N',
type(True): 'Z'
}
class SimpleSearch(object):
def __init__(self, name=None, key=None, value=None, new_value=None):
self.key = key
self.name = name
self.value = value
self.new_value = new_value
self.type = PJ_MAP.get(type(value), -1)
if self.type == -1:
raise Exception("Invalid value specified")
if self.type == self.D and key is None:
raise Exception("Dictionary specified but no key provided")
def check_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.type:
return False
if self.type == self.D:
self.check_dict_value(json_data)
elif self.type == self.L:
self.check_seq_value(json_data)
elif self.type in self.P_TYPES:
self.check_prim_value(json_data)
return False
def check_dict_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.D:
return False
elif self.key not in json_data:
return False
v = json_data.get(self.key)
return SimpleSearch(value=self.value).check_value(v)
def check_seq_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.type:
return False
elif len(self.value) != len(json_data):
return False
s_l = zip(sorted(self.value), sorted(json_data))
for a, b in s_l:
if not SimpleSearch(value=a).check_value(b):
return False
return True
def check_prim_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.type:
return False
if self.type == self.I:
return json_data == self.value
elif self.type == self.Z:
return json_data == self.value
elif self.type == self.N:
return json_data is None
elif self.type == self.S:
if isinstance(self.value, bytes):
return self.value.decode('utf-8') == json_data
return self.value == json_data
return False
class InsertSearch(object):
def __init__(self, name=None, key=None, value=None, new_value=None):
self.key = key
self.name = name
self.value = value
self.new_value = new_value
self.type = PJ_MAP.get(type(value), -1)
if self.type == -1:
raise Exception("Invalid value specified")
if self.type == self.D and key is None:
raise Exception("Dictionary specified but no key provided")
def check_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.type:
return False
if self.type == self.D:
self.check_dict_value(json_data)
elif self.type == self.L:
self.check_seq_value(json_data)
elif self.type in self.P_TYPES:
self.check_prim_value(json_data)
return False
def check_dict_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.D:
return False
elif self.key not in json_data:
return False
v = json_data.get(self.key)
return InsertSearch(value=self.value).check_value(v)
def check_seq_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.type:
return False
elif len(self.value) != len(json_data):
return False
s_l = zip(sorted(self.value), sorted(json_data))
for a, b in s_l:
if not InsertSearch(value=a).check_value(b):
return False
return True
def check_prim_value(self, json_data):
t = PJ_MAP.get(type(json_data), -1)
if t != self.type:
return False
if self.type == self.I:
return json_data == self.value
elif self.type == self.Z:
return json_data == self.value
elif self.type == self.N:
return json_data is None
elif self.type == self.S:
if isinstance(self.value, bytes):
return self.value.decode('utf-8') == json_data
return self.value == json_data
return False
| 30.895604 | 72 | 0.552552 |
298efccfd926da1ea13d17989e056b78a950849d | 376 | py | Python | chapterthree/howmanyguests.py | cmotek/python_crashcourse | 29cbdd6699cd17192bb599d235852d547630d110 | [
"Apache-2.0"
] | null | null | null | chapterthree/howmanyguests.py | cmotek/python_crashcourse | 29cbdd6699cd17192bb599d235852d547630d110 | [
"Apache-2.0"
] | null | null | null | chapterthree/howmanyguests.py | cmotek/python_crashcourse | 29cbdd6699cd17192bb599d235852d547630d110 | [
"Apache-2.0"
] | null | null | null | guestlist = ['Barack Obama', 'Stanley Kubrick', 'Thomas Pynchon']
message = (f"How does McDonalds sound, {guestlist[0]}?")
print(message)
message = (f"How does McDonalds sound, {guestlist[1]}?")
print(message)
message = (f"How does McDonalds sound, {guestlist[2]}?")
print(message)
print(f"We've got approximately, {len(guestlist)} guests coming to this McDonalds dinner.") | 34.181818 | 91 | 0.720745 |
bbb018b59f0429f761e17f25818848689a98338a | 1,879 | py | Python | src/day_07.py | bengosney/Advent-Of-Code-2021 | 47747d9fbc92bca0d44d986eee4b49f809df7770 | [
"MIT"
] | null | null | null | src/day_07.py | bengosney/Advent-Of-Code-2021 | 47747d9fbc92bca0d44d986eee4b49f809df7770 | [
"MIT"
] | 4 | 2021-11-30T16:17:02.000Z | 2021-12-13T14:22:57.000Z | src/day_07.py | bengosney/Advent-Of-Code-2021 | 47747d9fbc92bca0d44d986eee4b49f809df7770 | [
"MIT"
] | null | null | null | # Standard Library
import multiprocessing as mp
import sys
from functools import lru_cache, partial
# First Party
from utils import read_input
def move_crabs_to(crabs: list[int], position: int) -> int:
return sum(abs(position - crab) for crab in crabs)
def part_1(input: str) -> int:
crabs = list(map(int, input.split(",")))
min_fuel = sys.maxsize
for position in range(len(crabs)):
min_fuel = min(min_fuel, move_crabs_to(crabs, position))
return min_fuel
def move_crabs_to_exp(position: int, crabs: list[int]) -> int:
return sum(move_crab_exp(abs(position - crab)) for crab in crabs)
@lru_cache(maxsize=None)
def move_crab_exp(distance: int) -> int:
return (distance**2 + distance) // 2
def part_2(input: str) -> int:
crabs: list[int] = list(map(int, input.split(",")))
process_crabs = partial(move_crabs_to_exp, crabs=crabs)
pool = mp.Pool(mp.cpu_count())
fuel = pool.map(process_crabs, range(len(crabs)))
return min(fuel)
# -- Tests
def get_example_input() -> str:
return """16,1,2,0,4,2,7,1,2,14"""
def test_move_exp():
moves = [
(11, 66),
(4, 10),
(3, 6),
(5, 15),
(1, 1),
(3, 6),
(2, 3),
(4, 10),
(3, 6),
(9, 45),
]
for distance, fuel in moves:
assert move_crab_exp(distance) == fuel
def test_part_1():
input = get_example_input()
assert part_1(input) == 37
def test_part_2():
input = get_example_input()
assert part_2(input) == 168
def test_part_1_real():
input = read_input(__file__)
assert part_1(input) == 349769
def test_part_2_real():
input = read_input(__file__)
assert part_2(input) == 99540554
# -- Main
if __name__ == "__main__":
input = read_input(__file__)
print(f"Part1: {part_1(input)}")
print(f"Part2: {part_2(input)}")
| 19.778947 | 69 | 0.620543 |
fca55da3e197cb41ec47c39b9623009b79280219 | 4,583 | py | Python | scripts/trns_validate_KBaseAssembly.FA.py | srividya22/transform | 89f8f60d973be886864f94bcb5502f1e80fbf541 | [
"MIT"
] | null | null | null | scripts/trns_validate_KBaseAssembly.FA.py | srividya22/transform | 89f8f60d973be886864f94bcb5502f1e80fbf541 | [
"MIT"
] | null | null | null | scripts/trns_validate_KBaseAssembly.FA.py | srividya22/transform | 89f8f60d973be886864f94bcb5502f1e80fbf541 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# This code is part of KBase project to validate
#the fastq and fasta files
from __future__ import print_function
import math
import sys, getopt
import os.path
import subprocess
import json
import gzip
import io
import cStringIO
desc1 = '''
NAME
trns_validate_KBaseAssembly.FA -- Validate the fasta files (1.0)
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
trns_validate_KBaseAssembly.FA validate the fasta file and returns
a json string
TODO: It will support KBase log format.
'''
desc3 = '''
EXAMPLES
> trns_trns_validate_KBaseAssembly.FA -i <Input fasta file>
AUTHORS
Srividya Ramakrishnan.
'''
impt = os.environ.get("KB_TOP")+"/lib/jars/FastaValidator/FastaValidator-1.0.jar"
mc = 'FVTester'
#### Extensions supported for fastq and fasta
fastq_ext = ['.fq','.fq.gz','.fastq','.fastq.gz']
fasta_ext = ['.fa','.fa.gz','.fasta','.fasta.gz']
####File executables
fval_path= "fastQValidator"
#if os.environ.get("KB_RUNTIME") is not None:
# fast_path = os.environ.get("KB_RUNTIME")+'/lib'
#else:
# print("Environmental variable KB_RUNTIME" + " is not set")
# sys.exit(1)
#fast_path = "/kb/runtime/lib/"
### List of Exceptions
class Error(Exception):
"""Base class for exceptions in this module."""
pass
#class CalledProcessError(Exception):
# pass
io_method = cStringIO.StringIO
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
"""
error = ''
status = ''
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(*popenargs,stdout=subprocess.PIPE)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
status = 'FAILED'
error = output
else:
status = 'SUCCESS'
return {'status' : status, 'error' : error}
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def validate_fasta(filename):
ret = ''
kb_runtime = os.environ.get('KB_RUNTIME', '/kb/runtime')
java = "%s/java/bin/java" % kb_runtime
if os.path.isfile(filename):
ext = os.path.splitext(filename)[-1]
if ext == '.gz':
decomp_file = os.path.splitext(filename)[-2]
p = subprocess.Popen(["zcat", filename], stdout = subprocess.PIPE)
fh = io_method(p.communicate()[0])
assert p.returncode == 0
text_file = open(decomp_file, "w")
text_file.write(fh.getvalue())
text_file.close()
if ext == '.gz':
cmd2 = [java,"-classpath",impt,mc,os.path.splitext(filename)[-2]]
else:
cmd2 = [java,"-classpath",impt,mc,filename]
#print(cmd2)
ret = check_output(cmd2,stderr=sys.stderr)
else:
print("File " + filename + " doesnot exist ")
sys.exit(1)
return ret
class Validate(object):
""" Validate the object and return 0 or exit with an error
"""
def __init__(self,filename):
""" Initialize the validation function to proceed with validation
"""
if os.path.isfile(filename):
self.filename = filename
func = validate_fasta
else:
print("File " + filename + " doesnot exist ")
sys.exit(1)
ret = func(filename)
if "status" in ret.keys():
self.status = ret["status"]
if "error" in ret.keys():
self.error = ret["error"]
def usage():
print("Usage : trns_validate_KBaseAssembly.FA -i <filename> ")
def main(argv):
inputfile = ''
ret = None
try:
opts, args = getopt.getopt(argv,"hi:")
except getopt.GetoptError:
print('trns_validate_KBaseAssembly.FA -i <inputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('trns_validate_KBaseAssembly.FA -i <inputfile>')
sys.exit()
elif opt == "-i":
inputfile = arg
ret = Validate(inputfile)
else:
print('Invalid Option' + usage())
return ret
if __name__ == "__main__" :
if len(sys.argv) != 1:
ret = main(sys.argv[1:])
print(to_JSON(ret))
else:
usage()
exit(0);
| 27.608434 | 90 | 0.583679 |
c95179d56bb8a75ca36e206b56f3e5ed9097aa93 | 10,293 | py | Python | snmp_interface_4.py | mkevenaar/SysAdminBoard | abf63603a12d8db0a068c3cf8fbaa6c800ef88ed | [
"MIT"
] | 293 | 2015-01-01T12:33:12.000Z | 2022-03-29T23:50:48.000Z | snmp_interface_4.py | mkevenaar/SysAdminBoard | abf63603a12d8db0a068c3cf8fbaa6c800ef88ed | [
"MIT"
] | 7 | 2015-08-05T12:55:23.000Z | 2019-08-28T20:50:01.000Z | snmp_interface_4.py | mkevenaar/SysAdminBoard | abf63603a12d8db0a068c3cf8fbaa6c800ef88ed | [
"MIT"
] | 81 | 2015-01-21T03:12:26.000Z | 2021-10-05T12:26:00.000Z | #!/usr/bin/env python
"""snmp_interface: module called to generate SNMP monitoring data formatted for use with StatusBoard iPad App
# How To Calculate Bandwidth Utilization Using SNMP
# http://www.cisco.com/en/US/tech/tk648/tk362/technologies_tech_note09186a008009496e.shtml
"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
import time
import json
import logging.config
from credentials import SNMP_COMMUNITY
__author__ = 'scott@flakshack.com (Scott Vintinner)'
# =================================SETTINGS======================================
MAX_DATAPOINTS = 30
SAMPLE_INTERVAL = 60
GRAPH_TITLE = "CLT Bandwidth (Mbps)"
# Standard SNMP OIDs
# sysUpTime 1.3.6.1.2.1.1.3.0 (this is hundreds of a second)
# 64-bit counters because 32-bit defaults rollover too quickly
# ifHCInOctets 1.3.6.1.2.1.31.1.1.1.6.interfacenumber
# ifHCOutOctets 1.3.6.1.2.1.31.1.1.1.10.interfacenumber
# Enter the details for each SNMP counter.
# ip: This is the IP address or resolvable host name
# community: This is the SNMPv1 community that will grant access to read the OID (usually this is "public")
# oid: This is the SNMP OID interface counter we'll be measuring.
# uptime_oid: This is the SNMP OID for the device's uptime (so we know what the time was when we measured the counter)
# name: This is the name of the device as it will appear on the graph
DEVICES = (
{"ip": "clt-core", "community": SNMP_COMMUNITY, "oid": "1.3.6.1.2.1.31.1.1.1.6.7", "uptime_oid": "1.3.6.1.2.1.1.3.0", "name": "LEV3 RX"},
{"ip": "clt-core", "community": SNMP_COMMUNITY, "oid": "1.3.6.1.2.1.31.1.1.1.10.7", "uptime_oid": "1.3.6.1.2.1.1.3.0", "name": "LEV3 TX"},
{"ip": "clt-core", "community": SNMP_COMMUNITY, "oid": "1.3.6.1.2.1.31.1.1.1.6.24", "uptime_oid": "1.3.6.1.2.1.1.3.0", "name": "SPEC RX"},
{"ip": "clt-core", "community": SNMP_COMMUNITY, "oid": "1.3.6.1.2.1.31.1.1.1.10.24", "uptime_oid": "1.3.6.1.2.1.1.3.0", "name": "SPEC TX"},
)
# ================================================================================
class MonitorJSON:
"""This is a simple class passed to Monitor threads so we can access the current JSON data in that thread"""
def __init__(self):
self.json = output_message("Waiting " + str(SAMPLE_INTERVAL) + " seconds for first run", "")
class InterfaceDevice:
all_devices = [] # Static array containing all devices
def __init__(self, ip, community, oid, uptime_oid, name):
self.ip = ip
self.community = community
self.oid = oid
self.uptime_oid = uptime_oid
self.name = name
self.snmp_data = [] # Hold raw data
self.datapoints = [] # Holds pretty data
self.__class__.all_devices.append(self) # Add self to static array
class SNMPDatapoint:
def __init__(self, value, timeticks):
self.value = value
self.timeticks = timeticks
def get_snmp(device, community, snmp_oid, snmp_uptime_oid):
"""Returns the value of the specified snmp OID.
Also gets the uptime (TimeTicks) so we know exactly when the sample was taken."""
# Perform a synchronous SNMP GET
cmd_gen = cmdgen.CommandGenerator()
error_indication, error_status, error_index, var_binds = cmd_gen.getCmd(
cmdgen.CommunityData(community), cmdgen.UdpTransportTarget((device, 161)), snmp_oid, snmp_uptime_oid
)
snmp_value = None
snmp_error = None
snmp_uptime_value = None
if error_indication: # Check for SNMP errors
snmp_error = str(error_indication)
else:
if error_status:
snmp_error = error_status.prettyPrint()
else:
# varBinds are returned as SNMP objects, so convert to integers
snmp_value = int(var_binds[0][1])
snmp_uptime_value = int(var_binds[1][1])
return snmp_value, snmp_uptime_value, snmp_error
def calculate_bps(current_sample_octets, current_sample_time, historical_sample_octets, historical_sample_time):
"""Calculate the bits-per-second based on the octets and timeticks (hundreths of a second)."""
# When the SNMP counter reaches 18446744073709551615, it will rollover and reset to ZERO.
# If this happens, we want to make sure we don't output a negative bps
if current_sample_octets < historical_sample_octets:
# If we reset to 0, add the max value of the octets counter
current_sample_octets += 18446744073709551615
delta = current_sample_octets - historical_sample_octets
# SysUpTime is in TimeTicks (Hundreds of a second), so covert to seconds
seconds_between_samples = (current_sample_time - historical_sample_time) / 100.0
# Multiply octets by 8 to get bits
bps = (delta * 8) / seconds_between_samples
bps /= 1048576 # Convert to Mbps (use 1024 for Kbps)
bps = round(bps, 2)
return bps
def output_message(message, detail):
"""This function will output an error message formatted in JSON to display on the StatusBoard app"""
statusbar_output = {"graph": {"title": GRAPH_TITLE, "error": {"message": message, "detail": detail}}}
output = json.dumps(statusbar_output)
return output
def generate_json(snmp_monitor):
"""This function will take the device config and raw data (if any) from the snmp_monitor and output JSON data
formatted for the StatusBar iPad App"""
logger = logging.getLogger("snmp_interface_4")
time_x_axis = time.strftime("%H:%M") # Use the same time value for all samples per iteration
statusbar_datasequences = []
snmp_error = None
logger.debug("SNMP generate_json started: " + time_x_axis)
# Create a list of InterfaceDevices using the contants provided above
if len(InterfaceDevice.all_devices) == 0:
for device in DEVICES:
InterfaceDevice(device["ip"], device["community"], device["oid"], device["uptime_oid"], device["name"])
# Loop through each device, update the SNMP data
for device in InterfaceDevice.all_devices:
logger.debug(device.ip + " " + device.name + " " + device.oid)
# Get the SNMP data
try:
snmp_value, snmp_uptime_value, snmp_error = get_snmp(device.ip, device.community,
device.oid, device.uptime_oid)
except Exception as error:
if not snmp_error:
snmp_error = str(error)
if snmp_error:
logger.warning(snmp_error)
break
else:
logger.debug("value:" + str(snmp_value) + " uptime:" + str(snmp_uptime_value))
# Add the raw SNMP data to a list
if len(device.snmp_data) == 0: # first time through, initialize the list
device.snmp_data = [SNMPDatapoint(snmp_value, snmp_uptime_value)]
else:
device.snmp_data.append(SNMPDatapoint(snmp_value, snmp_uptime_value))
# If we already have the max number of datapoints in our list, delete the oldest item
if len(device.snmp_data) >= MAX_DATAPOINTS:
del(device.snmp_data[0])
# If we have at least 2 samples, calculate bps by comparing the last item with the second to last item
if len(device.snmp_data) > 1:
bps = calculate_bps(
device.snmp_data[-1].value,
device.snmp_data[-1].timeticks,
device.snmp_data[-2].value,
device.snmp_data[-2].timeticks
)
bps = round(bps, 2)
if len(device.datapoints) == 0:
device.datapoints = [{"title": time_x_axis, "value": bps}]
else:
device.datapoints.append({"title": time_x_axis, "value": bps})
# If we already have the max number of datapoints, delete the oldest item.
if len(device.datapoints) >= MAX_DATAPOINTS:
del(device.datapoints[0])
# Generate the data sequence
statusbar_datasequences.append({"title": device.name, "datapoints": device.datapoints})
# If this is the first run through, show Initializing on iPad
if snmp_error:
# If we ran into an SNMP error, go ahead and write out the JSON file with the error
snmp_monitor.json = output_message("Error retrieving SNMP data", snmp_error)
elif len(InterfaceDevice.all_devices[-1].snmp_data) <= 2:
snmp_monitor.json = output_message(
"Initializing bandwidth dataset: " +
str(SAMPLE_INTERVAL * (3 - len(InterfaceDevice.all_devices[-1].snmp_data))) +
" seconds...", ""
)
else:
# Generate JSON output and assign to snmp_monitor object (for return back to caller module)
statusbar_graph = {
"title": GRAPH_TITLE, "type": "line",
"refreshEveryNSeconds": SAMPLE_INTERVAL,
"datasequences": statusbar_datasequences
}
statusbar_type = {"graph": statusbar_graph}
snmp_monitor.json = json.dumps(statusbar_type)
logger.debug(snmp_monitor.json)
# ======================================================
# __main__
#
# If you run this module by itself, it will instantiate
# the MonitorJSON class and start an infinite loop
# printing data.
# ======================================================
#
if __name__ == '__main__':
# When run by itself, we need to create the logger object (which is normally created in webserver.py)
try:
f = open("log_settings.json", 'rt')
log_config = json.load(f)
f.close()
logging.config.dictConfig(log_config)
except FileNotFoundError as e:
print("Log configuration file not found: " + str(e))
logging.basicConfig(level=logging.DEBUG) # fallback to basic settings
except json.decoder.JSONDecodeError as e:
print("Error parsing logger config file: " + str(e))
raise
monitor = MonitorJSON()
while True:
main_logger = logging.getLogger(__name__)
generate_json(monitor)
# Wait X seconds for the next iteration
main_logger.debug("Waiting for " + str(SAMPLE_INTERVAL) + " seconds")
time.sleep(SAMPLE_INTERVAL)
| 42.709544 | 143 | 0.633926 |
e92809fd953f7a63d250ab8264367833fdb845cc | 7,221 | py | Python | train_fair.py | CuriousCat-7/fashion-mnist | de556ffca9234def3baa6d61a730e8b98d832a76 | [
"MIT"
] | 1 | 2020-06-05T09:06:03.000Z | 2020-06-05T09:06:03.000Z | train_fair.py | CuriousCat-7/fashion-mnist | de556ffca9234def3baa6d61a730e8b98d832a76 | [
"MIT"
] | null | null | null | train_fair.py | CuriousCat-7/fashion-mnist | de556ffca9234def3baa6d61a730e8b98d832a76 | [
"MIT"
] | null | null | null | import torch
from torchvision import datasets, models, transforms
import torch.optim as optim
import model
import utils
import time
import argparse
import os
import csv
from datetime import datetime
# from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default='FashionComplexNet', help="model")
parser.add_argument("--patience", type=int, default=3, help="early stopping patience")
parser.add_argument("--batch_size", type=int, default=256, help="batch size")
parser.add_argument("--nepochs", type=int, default=200, help="max epochs")
parser.add_argument("--nworkers", type=int, default=4, help="number of workers")
parser.add_argument("--seed", type=int, default=1, help="random seed")
parser.add_argument("--data", type=str, default='FashionMNIST', help="MNIST, or FashionMNIST")
args = parser.parse_args()
#viz
# tsboard = SummaryWriter()
# Set up the device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Training on {}'.format(device))
# Set seeds. If using numpy this must be seeded too.
torch.manual_seed(args.seed)
if device== 'cuda:0':
torch.cuda.manual_seed(args.seed)
# Setup folders for saved models and logs
if not os.path.exists('saved-models/'):
os.mkdir('saved-models/')
if not os.path.exists('logs/'):
os.mkdir('logs/')
# Setup folders. Each run must have it's own folder. Creates
# a logs folder for each model and each run.
out_dir = 'logs/{}'.format(args.model)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
run = 0
current_dir = '{}/run-{}'.format(out_dir, run)
while os.path.exists(current_dir):
run += 1
current_dir = '{}/run-{}'.format(out_dir, run)
os.mkdir(current_dir)
logfile = open('{}/log.txt'.format(current_dir), 'w')
print(args, file=logfile)
# Define transforms.
train_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
val_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# Create dataloaders. Use pin memory if cuda.
if args.data == 'FashionMNIST':
trainset = datasets.FashionMNIST('./data', train=True, download=True, transform=train_transforms)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=args.nworkers)
valset = datasets.FashionMNIST('./data', train=False, transform=val_transforms)
val_loader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size,
shuffle=True, num_workers=args.nworkers)
print('Training on FashionMNIST')
else:
trainset = datasets.MNIST('./data-mnist', train=True, download=True, transform=train_transforms)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=args.nworkers)
valset = datasets.MNIST('./data-mnist', train=False, transform=val_transforms)
val_loader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size,
shuffle=True, num_workers=args.nworkers)
print('Training on MNIST')
def run_model(net, loader, criterion, optimizer, train = True):
running_loss = 0
running_accuracy = 0
# Set mode
if train:
net.train()
else:
net.eval()
for i, (X, y) in enumerate(loader):
# Pass to gpu or cpu
X, y = X.to(device), y.to(device)
# Zero the gradient
optimizer.zero_grad()
with torch.set_grad_enabled(train):
if train:
for choice in net.random_shuffle:
net.set_choice(choice)
output = net(X)
_, pred = torch.max(output, 1)
loss = criterion(output, y)
loss.backward()
#torch.nn.utils.clip_grad_norm_(net.parameters(), GRAD_CLIP)
optimizer.step()
else:
net.set_choice(net.random_choice)
output = net(X)
_, pred = torch.max(output, 1)
loss = criterion(output, y)
# Calculate stats
running_loss += loss.item()
running_accuracy += torch.sum(pred == y.detach())
return running_loss / len(loader), running_accuracy.double() / len(loader.dataset)
def main(net):
# Init network, criterion and early stopping
flops_count , params_count = None, None
criterion = torch.nn.CrossEntropyLoss()
# Define optimizer
#optimizer = optim.Adam(net.parameters())
comm_params = list(net.stem.parameters()) +\
list(net.tail.parameters()) +\
list(net.classifier.parameters())
nas_params = list(net.mid.parameters())
params = [
{"params": nas_params},
{"params": comm_params, "lr":1e-3/3},
]
optimizer = optim.Adam(params)
# Train the network
patience = args.patience
best_loss = 1e4
best_acc = 0
writeFile = open('{}/stats.csv'.format(current_dir), 'a')
writer = csv.writer(writeFile)
writer.writerow(['Epoch', 'Train Loss', 'Train Accuracy', 'Validation Loss', 'Validation Accuracy'])
begin = datetime.now()
for e in range(args.nepochs):
start = time.time()
train_loss, train_acc = run_model(net, train_loader,
criterion, optimizer)
val_loss, val_acc = run_model(net, val_loader,
criterion, optimizer, False)
end = time.time()
# print stats
stats = """Epoch: {}\t train loss: {:.3f}, train acc: {:.3f}\t
val loss: {:.3f}, val acc: {:.3f}\t
time: {:.1f}s""".format(e+1, train_loss, train_acc, val_loss,
val_acc, end - start)
print(stats)
# viz
# tsboard.add_scalar('data/train-loss',train_loss,e)
# tsboard.add_scalar('data/val-loss',val_loss,e)
# tsboard.add_scalar('data/val-accuracy',val_acc.item(),e)
# tsboard.add_scalar('data/train-accuracy',train_acc.item(),e)
# Write to csv file
writer.writerow([e+1, train_loss, train_acc.item(), val_loss, val_acc.item()])
# early stopping and save best model
if val_acc > best_acc:
best_acc = val_acc.cpu().item()
if val_loss < best_loss:
best_loss = val_loss
patience = args.patience
utils.save_model({
'arch': args.model,
'state_dict': net.state_dict()
}, 'saved-models/{}-train-{}.pth.tar'.format(args.model, run))
else:
patience -= 1
if patience == 0:
print('Run out of patience!')
writeFile.close()
# tsboard.close()
break
rst = dict(
best_loss=best_loss,
best_acc=best_acc,
flops_count=flops_count,
params_count=params_count,
used_time = datetime.now() - begin
)
print(rst)
return rst
if __name__ == '__main__':
net = model.__dict__[args.model]().to(device)
main(net)
| 33.742991 | 104 | 0.618197 |
bee23cd272545e083618e52d45dc21836ca2e895 | 1,907 | py | Python | app/database_models/__init__.py | jaywonder20/Flask_Api_Starter | d3cf69f4742923737e826261f5e737f00d1c6270 | [
"MIT"
] | 1 | 2020-07-28T13:28:42.000Z | 2020-07-28T13:28:42.000Z | app/database_models/__init__.py | jaywonder20/Flask_Api_Starter | d3cf69f4742923737e826261f5e737f00d1c6270 | [
"MIT"
] | null | null | null | app/database_models/__init__.py | jaywonder20/Flask_Api_Starter | d3cf69f4742923737e826261f5e737f00d1c6270 | [
"MIT"
] | null | null | null | from app import db
class TweetsModel(db.Model):
__tablename__ = 'jobs'
id = db.Column(db.Integer, primary_key=True)
rawTweet = db.Column(db.String())
cleanedTweet = db.Column(db.String())
retweetCount = db.Column(db.String())
favoriteCount = db.Column(db.String())
isReply = db.Column(db.String())
UserCreatedDate = db.Column(db.String())
UserLikesNo = db.Column(db.String())
UserFollowerNo = db.Column(db.String())
UserFriendsNo = db.Column(db.String())
UserListNo = db.Column(db.String())
UserTotalTweet = db.Column(db.String())
UserIsVerified = db.Column(db.String())
UserLocation = db.Column(db.String())
author = db.Column(db.String())
hashtags = db.Column(db.String())
urls = db.Column(db.String())
likelyJobNames = db.Column(db.String())
userPicture = db.Column(db.String())
def __init__(self, rawTweet, cleanedTweet, retweetCount, favoriteCount, isReply, UserCreatedDate, UserLikesNo,
UserFollowerNo, UserFriendsNo, UserListNo, UserTotalTweet, UserIsVerified, UserLocation, author,
hashtags, urls, likelyJobNames,userPicture):
self.rawTweet = rawTweet
self.cleanedTweet = cleanedTweet
self.retweetCount = retweetCount
self.favoriteCount = favoriteCount
self.isReply = isReply
self.UserCreatedDate = UserCreatedDate
self.UserLikesNo = UserLikesNo
self.UserFollowerNo = UserFollowerNo
self.UserFriendsNo = UserFriendsNo
self.UserListNo = UserListNo
self.UserTotalTweet = UserTotalTweet
self.UserIsVerified = UserIsVerified
self.UserLocation = UserLocation
self.retweetCount = retweetCount
self.author = author
self.hashtags = hashtags
self.urls = urls
self.likelyJobNames = likelyJobNames
self.userPicture = userPicture
| 34.053571 | 114 | 0.673309 |
14898c7458e26ff6b427a3c48acc1044024189c7 | 1,980 | py | Python | couchjs/scons/scons-local-2.0.1/SCons/Tool/suncc.py | Gussy/bigcouch | 9e67d3f754186ce8368503509ae041a2847f2b7c | [
"Apache-2.0"
] | 73 | 2015-03-19T04:04:52.000Z | 2021-08-16T10:45:11.000Z | couchjs/scons/scons-local-2.0.1/SCons/Tool/suncc.py | Gussy/bigcouch | 9e67d3f754186ce8368503509ae041a2847f2b7c | [
"Apache-2.0"
] | 5 | 2016-04-26T13:19:25.000Z | 2017-03-11T14:11:22.000Z | couchjs/scons/scons-local-2.0.1/SCons/Tool/suncc.py | Gussy/bigcouch | 9e67d3f754186ce8368503509ae041a2847f2b7c | [
"Apache-2.0"
] | 13 | 2015-03-27T05:21:42.000Z | 2017-05-22T11:45:30.000Z | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.559322 | 95 | 0.732323 |
03238b23af745b87b091e3271ca38d4f926a471c | 7,796 | py | Python | utils.py | abcp4/pt.darts | 51acc1d8b5a11c98ee150f59f0cc57e67115d204 | [
"MIT"
] | null | null | null | utils.py | abcp4/pt.darts | 51acc1d8b5a11c98ee150f59f0cc57e67115d204 | [
"MIT"
] | null | null | null | utils.py | abcp4/pt.darts | 51acc1d8b5a11c98ee150f59f0cc57e67115d204 | [
"MIT"
] | null | null | null | """ Utilities """
import os
import logging
import shutil
import torch
import torchvision.datasets as dset
import numpy as np
import preproc
class ImageFolderWithPaths(dset.ImageFolder):
"""Custom dataset that includes image file paths. Extends
torchvision.datasets.ImageFolder
"""
# override the __getitem__ method. this is the method that dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# the image file path
path = self.imgs[index][0]
# make a new tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path
def get_data(dataset, data_path,val1_data_path,val2_data_path, cutout_length, validation,validation2 = False,img_size = 64):
""" Get torchvision dataset """
dataset = dataset.lower()
if dataset == 'cifar10':
dset_cls = dset.CIFAR10
n_classes = 10
elif dataset == 'mnist':
dset_cls = dset.MNIST
n_classes = 10
elif dataset == 'fashionmnist':
dset_cls = dset.FashionMNIST
n_classes = 10
elif dataset == 'custom':
dset_cls = dset.ImageFolder
n_classes = 3 #2 to mama
else:
raise ValueError(dataset)
trn_transform, val_transform = preproc.data_transforms(dataset, cutout_length,img_size)
if dataset == 'custom':
print("DATA PATH:", data_path)
trn_data = dset_cls(root=data_path, transform=trn_transform)
#dataset_loader = torch.utils.data.DataLoader(trn_data,
# batch_size=16, shuffle=True,
# num_workers=1)
else:
trn_data = dset_cls(root=data_path, train=True, download=True, transform=trn_transform)
# assuming shape is NHW or NHWC
if dataset == 'custom':
shape = [1, img_size, img_size,3]
else:
shape = trn_data.train_data.shape
print(shape)
input_channels = 3 if len(shape) == 4 else 1
assert shape[1] == shape[2], "not expected shape = {}".format(shape)
input_size = shape[1]
print('input_size: uitls',input_size)
ret = [input_size, input_channels, n_classes, trn_data]
if validation: # append validation data
if dataset == 'custom':
dset_cls = dset.ImageFolder(val1_data_path,transform=val_transform)
ret.append(dset_cls)
else:
ret.append(dset_cls(root=data_path, train=False, download=True, transform=val_transform))
if validation2:
if dataset == 'custom':
dset_cls =ImageFolderWithPaths(val2_data_path,transform=val_transform)
ret.append(dset_cls)
return ret
def get_logger(file_path):
""" Make python logger """
# [!] Since tensorboardX use default logger (e.g. logging.info()), we should use custom logger
logger = logging.getLogger('darts')
log_format = '%(asctime)s | %(message)s'
formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p')
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
def param_size(model):
""" Compute parameter size in MB """
n_params = sum(
np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head'))
return n_params / 1024. / 1024.
class AverageMeter():
""" Computes and stores the average and current value """
def __init__(self):
self.reset()
def reset(self):
""" Reset all statistics """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
""" Update statistics """
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
#print('output:',output)
#print('target:',target)
#print('maxk:',maxk)
###TOP 5 NAO EXISTE NAS MAAMAS OU NO GEO. TEM QUE TRATAR
maxk = 3 # Ignorando completamente o top5
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1.0 / batch_size))
return res
def save_checkpoint(model,epoch,w_optimizer,a_optimizer,loss, ckpt_dir, is_best=False, is_best_overall =False):
filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar')
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'w_optimizer_state_dict': w_optimizer.state_dict(),
'a_optimizer_state_dict': a_optimizer.state_dict(),
'loss': loss
}, filename)
if is_best:
best_filename = os.path.join(ckpt_dir, 'best.pth.tar')
shutil.copyfile(filename, best_filename)
if is_best_overall:
best_filename = os.path.join(ckpt_dir, 'best_overall.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model,epoch,w_optimizer,a_optimizer,loss, filename='checkpoint.pth.tar'):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
#print(checkpoint)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state_dict'])
w_optimizer.load_state_dict(checkpoint['w_optimizer_state_dict'])
a_optimizer.load_state_dict(checkpoint['a_optimizer_state_dict'])
loss = checkpoint['loss']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model,epoch,w_optimizer,a_optimizer,loss
def save_checkpoint2(model,epoch,optimizer,loss, ckpt_dir, is_best=False):
filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar')
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss
}, filename)
if is_best:
best_filename = os.path.join(ckpt_dir, 'best_model.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint2(model,epoch,optimizer,loss, filename='best_model.pth.tar'):
filename=filename+'checkpoint.pth.tar'
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
#print(checkpoint)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
loss = checkpoint['loss']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model,epoch,optimizer,loss
| 35.598174 | 124 | 0.641226 |
f162a3416c8758227c75b5dd38bc2608f68c396b | 2,548 | py | Python | code/compute_alignment_graphs.py | Ung0d/NeuroAlign | c73fd6f2d9c2fdb2e627a13ea1c45fb069e36ca4 | [
"Apache-2.0"
] | 2 | 2020-04-07T08:51:47.000Z | 2021-05-27T15:37:51.000Z | code/compute_alignment_graphs.py | Ung0d/NeuroAlign | c73fd6f2d9c2fdb2e627a13ea1c45fb069e36ca4 | [
"Apache-2.0"
] | null | null | null | code/compute_alignment_graphs.py | Ung0d/NeuroAlign | c73fd6f2d9c2fdb2e627a13ea1c45fb069e36ca4 | [
"Apache-2.0"
] | null | null | null | import ProcessSeq
import numpy as np
import argparse
import sys
sys.path.append('./ProcessSeq')
import AnchorSet
parser = argparse.ArgumentParser(description='Computes edge sets of alignment graphs for all ref alignments in ./data')
parser.add_argument("-r", type=int, default=7, help="the kmer radius")
parser.add_argument("-t", type=int, default=-1, help="treshold")
parser.add_argument("-s", type=str, default="blosum62.txt", help="the underlying scoring matrix")
parser.add_argument("-minrow", type=int, default=-1, help="minimum number of edges to build a row")
parser.add_argument("-a", type=int, default=200, help="maximum number of anchors allowed")
args = parser.parse_args()
num_alignments = 1509
NUM_THREAD = 20
scoring = AnchorSet.ScoringMatrix(args.s)
#compute alignment graphs for all ref alignments
for i in range(num_alignments):
print(i)
av_sol_sum = 0.0
av_num_edge_sum = 0
name = "A"+"{0:0=4d}".format(i)
instance = AnchorSet.MSAInstance("../data/data_unaligned/"+name+".fasta", True)
skip = False
for s in instance.seq:
if '/' in s:
print("/ found, skipping ", name)
skip = True
break
if skip:
continue
skip = False
for s in instance.seq:
if len(s) < 3*args.r+1:
print("Sequence too short, skipping ", name)
skip = True
break
if skip:
continue
if args.t == -1:
threshold = AnchorSet.sample_threshold(instance, args.r)
else:
threshold = args.t
anchors = AnchorSet.anchor_set_kmer_threshold(instance, scoring, args.r, threshold, NUM_THREAD)
AnchorSet.read_solution("../data/data/"+name+".fasta", anchors)
rows = AnchorSet.build_alignment_rows(anchors)
if len(rows) > 0:
if args.minrow == -1:
minrow = AnchorSet.sample_min_row(rows)
else:
minrow = args.minrow
rows = [r for r in rows if len(r) >= minrow]
anchors_row_contraction = AnchorSet.row_contraction(instance, anchors, rows, minrow)
anchors_row_contraction = AnchorSet.kBestAnchors(instance, anchors_row_contraction, args.a)
anchors_row_contraction.to_file("../data/anchors_"+str(args.r)+"_"+str(threshold)+"_"+str(args.a)+"/"+name)
else:
print("No fitting rows found: ", name)
if anchors_row_contraction.loaded:
av_sol_sum += np.sum(anchors_row_contraction.solution)/len(anchors_row_contraction.solution)
av_num_edge_sum += anchors_row_contraction.anchor_data.shape[0]
| 34.90411 | 119 | 0.67033 |
7e3ef36405ea0346006c01bd14fdcfa0dc1b6896 | 674 | py | Python | manage.py | Hegelim/twitterweather | fb509da7413878d6088d7545fef870e0e721e87a | [
"BSD-3-Clause"
] | null | null | null | manage.py | Hegelim/twitterweather | fb509da7413878d6088d7545fef870e0e721e87a | [
"BSD-3-Clause"
] | null | null | null | manage.py | Hegelim/twitterweather | fb509da7413878d6088d7545fef870e0e721e87a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'twitterweathersite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.304348 | 82 | 0.683976 |
bedfe1aef388638dd63038b36f4300035f9c04e2 | 3,118 | py | Python | tests/integration_test.py | andreasbossard/deutschland | 6f561256c707e21f81b54b139b9acb745b901298 | [
"Apache-2.0"
] | 445 | 2021-07-26T22:00:26.000Z | 2022-03-31T08:31:08.000Z | tests/integration_test.py | andreasbossard/deutschland | 6f561256c707e21f81b54b139b9acb745b901298 | [
"Apache-2.0"
] | 30 | 2021-07-27T15:42:23.000Z | 2022-03-26T16:14:11.000Z | tests/integration_test.py | andreasbossard/deutschland | 6f561256c707e21f81b54b139b9acb745b901298 | [
"Apache-2.0"
] | 28 | 2021-07-27T10:48:43.000Z | 2022-03-26T14:31:30.000Z | import datetime
from deutschland.bundesanzeiger import Bundesanzeiger
from deutschland.bundesnetzagentur import Rufzeichen
from deutschland.bundeswahlleiter import Bundeswahlleiter
from deutschland.handelsregister import Handelsregister
from deutschland.handelsregister.registrations import Registrations
def test_for_no_data_deutsche_bahn_ag():
ba = Bundesanzeiger()
data = ba.get_reports("Deutsche Bahn AG")
assert len(data.keys()) > 0, "Found no reports for Deutsche Bahn AG"
def test_for_no_data_handelsregister():
hr = Handelsregister()
data = hr.search(keywords="foobar", keyword_match_option=3)
assert (
len(data) == 0
), "Found registered companies for 'foobar' although none were expected."
def test_fetching_handelsregister_data_for_deutsche_bahn_ag():
hr = Handelsregister()
data = hr.search(
keywords="Deutsche Bahn Aktiengesellschaft", keyword_match_option=3
)
assert (
len(data) > 0
), "Found no data for 'Deutsche Bahn Aktiengesellschaft' although it should exist."
def test_fetching_handelsregister_data_for_deutsche_bahn_ag_with_raw_params():
r = Registrations()
data = r.search_with_raw_params(
{"schlagwoerter": "Deutsche Bahn Aktiengesellschaft", "schlagwortOptionen": 3}
)
assert (
len(data) > 0
), "Found no data for 'Deutsche Bahn Aktiengesellschaft' although it should exist."
def test_fetching_publications_for_deutsche_bank():
hr = Handelsregister()
data = hr.search_publications(
company_name="Deutsche Bank",
county_code="he",
court_code="M1201",
court_name="Frankfurt am Main",
detailed_search=True,
)
assert len(data) > 0, "Found no data for 'Deutsche Bank' although it should exist."
def test_fetching_publication_detail():
hr = Handelsregister()
data = hr.get_publication_detail(publication_id="896236", county_code="bw")
assert data, "Found no publication detail data although it should exist."
assert data["court"] == "Freiburg"
assert data["registration_type"] == "HRB"
assert data["registration_number"] == "719927"
assert data["decided_on"] == datetime.datetime(2021, 8, 6, 0, 0)
assert data["published_at"] == datetime.datetime(2021, 8, 6, 9, 45)
assert data["publication_type"] == "Löschungen"
assert data["publication_text"].startswith("HRB 719927:")
def test_no_data_for_publication_detail():
hr = Handelsregister()
data = hr.get_publication_detail(publication_id="9999999999999", county_code="bw")
assert data is None
def test_callsign():
rz = Rufzeichen()
data = rz.get("DL*MIC")
assert data["klasse"] == "A", "No valid callsign data returned"
def test_bundeswahlleiter():
bwl = Bundeswahlleiter()
results1998 = bwl.load_results(1998)
results2017 = bwl.load_results(2017)
results2021 = bwl.load_results(2021)
# results contain rows for each Wahlkreis, Bundesland and the Bund
assert len(results1998) == 328 + 16 + 1
assert len(results2017) == 299 + 16 + 1
assert len(results2021) == 299 + 16 + 1
| 34.644444 | 87 | 0.714561 |
c58dbc51ab6e4ad6ae107937d77db814e335b0bf | 5,505 | py | Python | lp/ui/marc.py | edsu/launchpad | 7524b4ec0850b19f058cb325749a35f8a1acb194 | [
"MIT"
] | null | null | null | lp/ui/marc.py | edsu/launchpad | 7524b4ec0850b19f058cb325749a35f8a1acb194 | [
"MIT"
] | null | null | null | lp/ui/marc.py | edsu/launchpad | 7524b4ec0850b19f058cb325749a35f8a1acb194 | [
"MIT"
] | null | null | null | """
Extracts selected MARC data to a friendly Python dictionary.
"""
import os
import re
import json
# turn the 043 codes into human readable strings based on the table list at
# http://www.loc.gov/standards/codelists/gacs.xml
gacs_file = os.path.join(os.path.dirname(__file__), "gacs.json")
gacs_dict = json.loads(open(gacs_file).read())
def gacs(field):
values = []
for c, v in field:
# only interested in subfield a
if c == 'a':
# strip trailing dashes from gacs code if present
v = re.sub(r"-+$", "", v)
# add the string for the gacs code if it is available
values.append(gacs_dict.get(v, v))
return values
# a machine readable version of
# https://github.com/gwu-libraries/launchpad/wiki/MARC-Extraction
# note: the order of each rule controls the display order
mapping = (
('STANDARD_TITLE', 'Standard Title', ['240']),
('OTHER_TITLE', 'Other Title', ['130', '242', '246', '730', '740', '247']),
('OTHER_AUTHORS', 'Other Authors', [('700', None, None, 'a,d'), '710', '711']),
('EARLIER_TITLE', 'Earlier Title', ['247', '780']),
('TITLE_CHANGED_TO', 'Title Changed To', ['785']),
('SUBJECTS', 'Subjects', ['650', '600', '610', '630', '651']),
('SERIES', 'Series', ['440', '800', '810', '811', '830']),
('DESCRIPTION', 'Description', ['300', '351', '516', '344', '345', '346', '347']),
('COPYRIGHT_DATE', 'Copyright Date', [('264', None, None, 'c')]),
('NOTES', 'Notes', ['500', '501', '504', '507', '521', '530', '546', '547',
'550', '586', '590', '541']),
('SUMMARY', 'Summary', ['520']),
('BIOGRAPHICAL NOTES', 'Biographical Notes', ['545']),
('CURRENT_FREQUENCY', 'Current Frequency', ['310', '321']),
('PUBLICATION_HISTORY', 'Publication History', ['362']),
('IN_COLLECTION', 'In Collection', [
('773', None, None, 'abdghikmnopqrstuwxyz')
]),
('THESIS_DISSERTATION', 'Thesis/Dissertation', ['502']),
('CONTENTS', 'Contents', ['505', '990']),
('PRODUCTION_CREDITS', 'Production Credits', ['508']),
('CITATION', 'Citation', ['510']),
('PERFORMERS', 'Performers', ['511']),
('REPRODUCTION', 'Reproduction', ['533']),
('ORIGINAL_VERSION', 'Original Version', ['534']),
('FUNDING_SPONSORS', 'Funding Sponsors', ['536']),
('SYSTEM_REQUIREMENTS', 'System Requirements', ['538']),
('TERMS_OF_USAGE', 'Terms of Usage', ['540']),
('COPYRIGHT', 'Copyright', ['542']),
('FINDING_AIDS', 'Finding Aids', ['555']),
('TITLE_HISTORY', 'Title History', ['580']),
('SOURCE_DESCRIPTION', 'Source Description', ['588']),
('MANUFACTURE_NUMBERS', 'Manufacture Numbers', ['028']),
('GENRE', 'Genre', [('655', None, None, 'a')]),
('OTHER_STANDARD_IDENTIFIER', 'Other Identifiers', ['024']),
('PUBLISHER_NUMBER', 'Publisher Numbers', ['028']),
('GEOGRAPHIC_AREA', 'Geographic Area', [('043', gacs)]),
)
def extract(record, d={}):
"""
Takes a pymarc.Record object and returns extracted information as a
dictionary. If you pass in a dictionary the extracted information will
be folded into it.
"""
for name, display_name, specs in mapping:
d[name] = []
for spec in specs:
# simple field specification
if type(spec) == str:
for field in record.get_fields(spec):
if field.is_subject_field():
d[name].append(subject(field))
else:
d[name].append(field.format_field())
# complex field specification
elif len(spec) == 4:
tag, ind1, ind2, subfields = spec
for field in record.get_fields(tag):
if ind(ind1, field.indicator1) and ind(ind2,
field.indicator2):
parts = []
for code, value in field:
# TODO: we purposefully ignore $6 for now since
# it is used for linking alternate script
# representations. Ideally some day we could
# have a way to layer them into our data
# representation, or simply using the original
# character set as the default since our
# web browsers can easily display them now.
if code != '6' and code in subfields:
parts.append(value)
if len(parts) > 0:
d[name].append(' '.join(parts))
# function based specification
elif len(spec) == 2:
tag, func = spec
for field in record.get_fields(tag):
d[name].extend(func(field))
# uhoh, the field specification looks bad
else:
raise Exception("invalid mapping for %s" % name)
return d
def ind(expected, found):
"Tests an indicator rule"
if expected is None:
return True
elif expected == found:
return True
else:
return False
def subject(f):
s = ''
for code, value in f:
if code in map(lambda x: str(x), list(range(0, 9, 1))):
continue
elif code not in ('v', 'x', 'y', 'z'):
s += ' %s' % value
else:
s += ' -- %s' % value
return s.strip()
| 37.965517 | 86 | 0.534242 |
a986fdb47a6e65bec96ec2750de60d735e3595e6 | 6,275 | py | Python | synapse/api/constants.py | warricksothr/synapse | 1de26b346796ec8d6b51b4395017f8107f640c47 | [
"Apache-2.0"
] | null | null | null | synapse/api/constants.py | warricksothr/synapse | 1de26b346796ec8d6b51b4395017f8107f640c47 | [
"Apache-2.0"
] | null | null | null | synapse/api/constants.py | warricksothr/synapse | 1de26b346796ec8d6b51b4395017f8107f640c47 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains constants from the specification."""
# the max size of a (canonical-json-encoded) event
MAX_PDU_SIZE = 65536
# the "depth" field on events is limited to 2**63 - 1
MAX_DEPTH = 2 ** 63 - 1
# the maximum length for a room alias is 255 characters
MAX_ALIAS_LENGTH = 255
# the maximum length for a user id is 255 characters
MAX_USERID_LENGTH = 255
# The maximum length for a group id is 255 characters
MAX_GROUPID_LENGTH = 255
MAX_GROUP_CATEGORYID_LENGTH = 255
MAX_GROUP_ROLEID_LENGTH = 255
class Membership:
"""Represents the membership states of a user in a room."""
INVITE = "invite"
JOIN = "join"
KNOCK = "knock"
LEAVE = "leave"
BAN = "ban"
LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
class PresenceState:
"""Represents the presence state of a user."""
OFFLINE = "offline"
UNAVAILABLE = "unavailable"
ONLINE = "online"
BUSY = "org.matrix.msc3026.busy"
class JoinRules:
PUBLIC = "public"
KNOCK = "knock"
INVITE = "invite"
PRIVATE = "private"
# As defined for MSC3083.
MSC3083_RESTRICTED = "restricted"
class RestrictedJoinRuleTypes:
"""Understood types for the allow rules in restricted join rules."""
ROOM_MEMBERSHIP = "m.room_membership"
class LoginType:
PASSWORD = "m.login.password"
EMAIL_IDENTITY = "m.login.email.identity"
MSISDN = "m.login.msisdn"
RECAPTCHA = "m.login.recaptcha"
TERMS = "m.login.terms"
SSO = "m.login.sso"
DUMMY = "m.login.dummy"
# This is used in the `type` parameter for /register when called by
# an appservice to register a new user.
APP_SERVICE_REGISTRATION_TYPE = "m.login.application_service"
class EventTypes:
Member = "m.room.member"
Create = "m.room.create"
Tombstone = "m.room.tombstone"
JoinRules = "m.room.join_rules"
PowerLevels = "m.room.power_levels"
Aliases = "m.room.aliases"
Redaction = "m.room.redaction"
ThirdPartyInvite = "m.room.third_party_invite"
RelatedGroups = "m.room.related_groups"
RoomHistoryVisibility = "m.room.history_visibility"
CanonicalAlias = "m.room.canonical_alias"
Encrypted = "m.room.encrypted"
RoomAvatar = "m.room.avatar"
RoomEncryption = "m.room.encryption"
GuestAccess = "m.room.guest_access"
# These are used for validation
Message = "m.room.message"
Topic = "m.room.topic"
Name = "m.room.name"
ServerACL = "m.room.server_acl"
Pinned = "m.room.pinned_events"
Retention = "m.room.retention"
Dummy = "org.matrix.dummy_event"
SpaceChild = "m.space.child"
SpaceParent = "m.space.parent"
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
MSC2716_CHUNK = "org.matrix.msc2716.chunk"
MSC2716_MARKER = "org.matrix.msc2716.marker"
class ToDeviceEventTypes:
RoomKeyRequest = "m.room_key_request"
class DeviceKeyAlgorithms:
"""Spec'd algorithms for the generation of per-device keys"""
ED25519 = "ed25519"
CURVE25519 = "curve25519"
SIGNED_CURVE25519 = "signed_curve25519"
class EduTypes:
Presence = "m.presence"
class RejectedReason:
AUTH_ERROR = "auth_error"
class RoomCreationPreset:
PRIVATE_CHAT = "private_chat"
PUBLIC_CHAT = "public_chat"
TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
class ThirdPartyEntityKind:
USER = "user"
LOCATION = "location"
ServerNoticeMsgType = "m.server_notice"
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
class UserTypes:
"""Allows for user type specific behaviour. With the benefit of hindsight
'admin' and 'guest' users should also be UserTypes. Normal users are type None
"""
SUPPORT = "support"
BOT = "bot"
ALL_USER_TYPES = (SUPPORT, BOT)
class RelationTypes:
"""The types of relations known to this server."""
ANNOTATION = "m.annotation"
REPLACE = "m.replace"
REFERENCE = "m.reference"
class LimitBlockingTypes:
"""Reasons that a server may be blocked"""
MONTHLY_ACTIVE_USER = "monthly_active_user"
HS_DISABLED = "hs_disabled"
class EventContentFields:
"""Fields found in events' content, regardless of type."""
# Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
LABELS = "org.matrix.labels"
# Timestamp to delete the event after
# cf https://github.com/matrix-org/matrix-doc/pull/2228
SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
# cf https://github.com/matrix-org/matrix-doc/pull/1772
ROOM_TYPE = "type"
# Used on normal messages to indicate they were historically imported after the fact
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
# For "insertion" events to indicate what the next chunk ID should be in
# order to connect to it
MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id"
# Used on "chunk" events to indicate which insertion event it connects to
MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id"
# For "marker" events
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
class RoomTypes:
"""Understood values of the room_type field of m.room.create events."""
SPACE = "m.space"
class RoomEncryptionAlgorithms:
MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2"
DEFAULT = MEGOLM_V1_AES_SHA2
class AccountDataTypes:
DIRECT = "m.direct"
IGNORED_USER_LIST = "m.ignored_user_list"
class HistoryVisibility:
INVITED = "invited"
JOINED = "joined"
SHARED = "shared"
WORLD_READABLE = "world_readable"
class ReadReceiptEventFields:
MSC2285_HIDDEN = "org.matrix.msc2285.hidden"
| 26.588983 | 88 | 0.709641 |
4164affe0236b1b972f280fededd756d1333220a | 7,052 | py | Python | profiling/timing.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | null | null | null | profiling/timing.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | 1 | 2019-03-02T00:29:12.000Z | 2019-03-02T04:59:54.000Z | profiling/timing.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | null | null | null |
import copy
import numpy as np
import time
import matplotlib.pyplot as plt
import memory_profiler
from floris.simulation import Floris
from conftest import SampleInputs
def time_profile(input_dict):
floris = Floris.from_dict(input_dict.floris)
start = time.perf_counter()
floris.steady_state_atmospheric_condition()
end = time.perf_counter()
return end - start
def internal_probe(input_dict):
floris = Floris(input_dict=input_dict.floris)
internal_quantity = floris.steady_state_atmospheric_condition()
return internal_quantity
def memory_profile(input_dict):
floris = Floris(input_dict=input_dict.floris)
mem_usage = memory_profiler.memory_usage(
(floris.steady_state_atmospheric_condition, (), {}),
max_usage=True
)
return mem_usage
if __name__=="__main__":
sample_inputs = SampleInputs()
TURBINE_DIAMETER = sample_inputs.floris["turbine"]["rotor_diameter"]
# Use Gauss models
sample_inputs.floris["wake"]["model_strings"] = {
"velocity_model": "gauss",
"deflection_model": "gauss",
"combination_model": None,
"turbulence_model": None,
}
### Time scaling
# N = 30
# wd_calc_time = np.zeros(N)
# wd_size = np.zeros(N)
# wind_direction_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_direction_scaling_inputs.floris["flow_field"]["wind_directions"] = factor * [270.0]
# wind_direction_scaling_inputs.floris["flow_field"]["wind_speeds"] = [8.0]
# wd_calc_time[i] = time_profile(copy.deepcopy(wind_direction_scaling_inputs))
# wd_size[i] = factor
# print("wind direction", i, wd_calc_time[i])
# ws_calc_time = np.zeros(N)
# ws_size = np.zeros(N)
# wind_speed_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_speed_scaling_inputs.floris["flow_field"]["wind_directions"] = [270.0]
# wind_speed_scaling_inputs.floris["flow_field"]["wind_speeds"] = factor * [8.0]
# ws_calc_time[i] = time_profile(copy.deepcopy(wind_speed_scaling_inputs))
# ws_size[i] = factor
# print("wind speed", i, ws_calc_time[i])
# turb_calc_time = np.zeros(N)
# turb_size = np.zeros(N)
# turbine_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 3
# turbine_scaling_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(factor)]
# turbine_scaling_inputs.floris["farm"]["layout_y"] = factor * [0.0]
# turb_calc_time[i] = time_profile(copy.deepcopy(turbine_scaling_inputs))
# turb_size[i] = factor
# print("n turbine", i, turb_calc_time[i])
# internal_quantity = np.zeros(N)
# scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(5):
# factor = (i+1) * 2
# scaling_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(factor)]
# scaling_inputs.floris["farm"]["layout_y"] = factor * [0.0]
# factor = (i+1) * 20
# scaling_inputs.floris["flow_field"]["wind_directions"] = factor * [270.0]
# scaling_inputs.floris["flow_field"]["wind_speeds"] = factor * [8.0]
# internal_quantity[i] = time_profile(scaling_inputs)
# print("n turbine", i, internal_quantity[i])
# plt.figure()
# plt.plot(wd_size, wd_calc_time, 'b+-', label='wind direction')
# plt.plot(ws_size, ws_calc_time, 'g+-', label='wind speed')
# plt.plot(turb_size, turb_calc_time, 'r+-', label='n turbine')
# # plt.plot(simulation_size, internal_quantity, 'b+-', label='internal quantity')
# plt.legend(loc="upper left")
# plt.grid(True)
### Timing larger sizes in each dimension
n_wind_directions = 1
n_wind_speeds = 1
n_turbines = 3
sample_inputs.floris["wake"]["model_strings"] = {
# "velocity_model": "jensen",
# "deflection_model": "jimenez",
"velocity_model": "cc",
"deflection_model": "gauss",
"combination_model": None,
"turbulence_model": None,
}
sample_inputs.floris["solver"] = {
"type": "turbine_grid",
"turbine_grid_points": 5
}
# sample_inputs.floris["wake"]["enable_transverse_velocities"] = False
# sample_inputs.floris["wake"]["enable_secondary_steering"] = False
# sample_inputs.floris["wake"]["enable_yaw_added_recovery"] = False
sample_inputs.floris["flow_field"]["wind_directions"] = n_wind_directions * [270.0]
sample_inputs.floris["flow_field"]["wind_speeds"] = n_wind_speeds * [8.0]
sample_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(n_turbines)]
sample_inputs.floris["farm"]["layout_y"] = n_turbines * [0.0]
N = 1
times = np.zeros(N)
for i in range(N):
print(f"Iteration {i}")
times[i] = time_profile(copy.deepcopy(sample_inputs))
print(f" {times[i]}")
print(f"Total time: {np.sum(times)}")
print(f"Average per iteration: { np.sum(times) / N }")
### Memory scaling
# N = 6
# simulation_size = np.arange(N)
# wd_space = np.zeros(N)
# wind_direction_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_direction_scaling_inputs.floris["farm"]["wind_directions"] = factor * [270.0]
# wind_direction_scaling_inputs.floris["farm"]["wind_speeds"] = [8.0]
# wd_space[i] = memory_profile(wind_direction_scaling_inputs)
# print("wind direction", i, wd_space[i])
# ws_space = np.zeros(N)
# wind_speed_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_speed_scaling_inputs.floris["farm"]["wind_directions"] = [270.0]
# wind_speed_scaling_inputs.floris["farm"]["wind_speeds"] = factor * [8.0]
# ws_space[i] = memory_profile(wind_speed_scaling_inputs)
# print("wind speed", i, ws_space[i])
# turb_space = np.zeros(N)
# turbine_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# turbine_scaling_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(factor)]
# turbine_scaling_inputs.floris["farm"]["layout_y"] = factor * [0.0]
# turb_space[i] = memory_profile(turbine_scaling_inputs)
# print("n turbine", turb_space[i])
# # Remove the min from each test so that each starts at 0
# wd_space = wd_space - min(wd_space)
# ws_space = ws_space - min(ws_space)
# turb_space = turb_space - min(turb_space)
# plt.figure()
# plt.plot(simulation_size, wd_space, 'b+-', label='wind direction')
# plt.plot(simulation_size, ws_space, 'g+-', label='wind speed')
# plt.plot(simulation_size, turb_space, 'r+-', label='n turbine')
# plt.legend(loc="upper left")
# plt.grid(True)
### Show plots
# plt.show()
| 35.616162 | 111 | 0.643931 |
4fba405c92cb77187f7a85319721c9abf6cce343 | 263 | py | Python | ifitwala_ed/asset/doctype/stock_ledger_entry/stock_ledger_entry.py | nbhatti/ifitwala_ed | 3e38ebb94c9e7d551b5404344076d6053f2fee21 | [
"MIT"
] | null | null | null | ifitwala_ed/asset/doctype/stock_ledger_entry/stock_ledger_entry.py | nbhatti/ifitwala_ed | 3e38ebb94c9e7d551b5404344076d6053f2fee21 | [
"MIT"
] | null | null | null | ifitwala_ed/asset/doctype/stock_ledger_entry/stock_ledger_entry.py | nbhatti/ifitwala_ed | 3e38ebb94c9e7d551b5404344076d6053f2fee21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, ifitwala and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class StockLedgerEntry(Document):
pass
| 23.909091 | 49 | 0.779468 |
aaac6f4a6e0ead0ed207492cbe1c6a1061d32166 | 14,508 | py | Python | sdks/python/apache_beam/typehints/trivial_inference_test.py | harrydrippin/beam | 4b413bbb5f8807b0f7a284fd818f2772f036fe55 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2022-01-11T19:43:12.000Z | 2022-01-15T15:45:20.000Z | sdks/python/apache_beam/typehints/trivial_inference_test.py | harrydrippin/beam | 4b413bbb5f8807b0f7a284fd818f2772f036fe55 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 7 | 2022-01-04T21:44:54.000Z | 2022-03-19T12:42:37.000Z | sdks/python/apache_beam/typehints/trivial_inference_test.py | harrydrippin/beam | 4b413bbb5f8807b0f7a284fd818f2772f036fe55 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 17 | 2021-12-15T19:31:54.000Z | 2022-01-31T18:54:23.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.typehints.trivial_inference."""
# pytype: skip-file
import types
import unittest
import apache_beam as beam
from apache_beam.typehints import row_type
from apache_beam.typehints import trivial_inference
from apache_beam.typehints import typehints
global_int = 1
class TrivialInferenceTest(unittest.TestCase):
def assertReturnType(self, expected, f, inputs=(), depth=5):
self.assertEqual(
expected,
trivial_inference.infer_return_type(f, inputs, debug=True, depth=depth))
def testBuildListUnpack(self):
# Lambda uses BUILD_LIST_UNPACK opcode in Python 3.
self.assertReturnType(
typehints.List[int],
lambda _list: [*_list, *_list, *_list], [typehints.List[int]])
def testBuildTupleUnpack(self):
# Lambda uses BUILD_TUPLE_UNPACK opcode in Python 3.
# yapf: disable
self.assertReturnType(
typehints.Tuple[typehints.Union[int, str], ...],
lambda _list1, _list2: (*_list1, *_list2, *_list2),
[typehints.List[int], typehints.List[str]])
# yapf: enable
def testBuildSetUnpackOrUpdate(self):
self.assertReturnType(
typehints.Set[typehints.Union[int, str]],
lambda _list1,
_list2: {*_list1, *_list2, *_list2},
[typehints.List[int], typehints.List[str]])
def testBuildMapUnpackOrUpdate(self):
self.assertReturnType(
typehints.Dict[str, typehints.Union[int, str, float]],
lambda a,
b,
c: {
**a, **b, **c
},
[
typehints.Dict[str, int],
typehints.Dict[str, str],
typehints.List[typehints.Tuple[str, float]]
])
def testIdentity(self):
self.assertReturnType(int, lambda x: x, [int])
def testIndexing(self):
self.assertReturnType(int, lambda x: x[0], [typehints.Tuple[int, str]])
self.assertReturnType(str, lambda x: x[1], [typehints.Tuple[int, str]])
self.assertReturnType(str, lambda x: x[1], [typehints.List[str]])
def testTuples(self):
self.assertReturnType(
typehints.Tuple[typehints.Tuple[()], int], lambda x: ((), x), [int])
self.assertReturnType(
typehints.Tuple[str, int, float], lambda x: (x, 0, 1.0), [str])
def testGetItem(self):
def reverse(ab):
return ab[-1], ab[0]
self.assertReturnType(
typehints.Tuple[typehints.Any, typehints.Any], reverse, [typehints.Any])
self.assertReturnType(
typehints.Tuple[int, float], reverse, [typehints.Tuple[float, int]])
self.assertReturnType(
typehints.Tuple[int, str], reverse, [typehints.Tuple[str, float, int]])
self.assertReturnType(
typehints.Tuple[int, int], reverse, [typehints.List[int]])
def testGetItemSlice(self):
self.assertReturnType(
typehints.List[int], lambda v: v[::-1], [typehints.List[int]])
self.assertReturnType(
typehints.Tuple[int], lambda v: v[::-1], [typehints.Tuple[int]])
self.assertReturnType(str, lambda v: v[::-1], [str])
self.assertReturnType(typehints.Any, lambda v: v[::-1], [typehints.Any])
self.assertReturnType(typehints.Any, lambda v: v[::-1], [object])
# Test binary_subscr on a slice of a Const.
test_list = ['a', 'b']
self.assertReturnType(typehints.List[str], lambda: test_list[:], [])
def testUnpack(self):
def reverse(a_b):
(a, b) = a_b
return b, a
any_tuple = typehints.Tuple[typehints.Any, typehints.Any]
self.assertReturnType(
typehints.Tuple[int, float], reverse, [typehints.Tuple[float, int]])
self.assertReturnType(
typehints.Tuple[int, int], reverse, [typehints.Tuple[int, ...]])
self.assertReturnType(
typehints.Tuple[int, int], reverse, [typehints.List[int]])
self.assertReturnType(
typehints.Tuple[typehints.Union[int, float, str],
typehints.Union[int, float, str]],
reverse, [typehints.Tuple[int, float, str]])
self.assertReturnType(any_tuple, reverse, [typehints.Any])
self.assertReturnType(
typehints.Tuple[int, float],
reverse, [trivial_inference.Const((1.0, 1))])
self.assertReturnType(
any_tuple, reverse, [trivial_inference.Const((1, 2, 3))])
def testBuildMap(self):
self.assertReturnType(
typehints.Dict[typehints.Any, typehints.Any],
lambda k,
v: {}, [int, float])
self.assertReturnType(
typehints.Dict[int, float], lambda k, v: {k: v}, [int, float])
self.assertReturnType(
typehints.Tuple[str, typehints.Dict[int, float]],
lambda k,
v: ('s', {
k: v
}), [int, float])
self.assertReturnType(
typehints.Dict[int, typehints.Union[float, str]],
lambda k1,
v1,
k2,
v2: {
k1: v1, k2: v2
}, [int, float, int, str])
# Constant map.
self.assertReturnType(
typehints.Dict[str, typehints.Union[int, float]],
lambda a,
b: {
'a': a, 'b': b
}, [int, float])
self.assertReturnType(
typehints.Tuple[int, typehints.Dict[str, typehints.Union[int, float]]],
lambda a,
b: (4, {
'a': a, 'b': b
}), [int, float])
def testNoneReturn(self):
def func(a):
if a == 5:
return a
return None
self.assertReturnType(typehints.Union[int, type(None)], func, [int])
def testSimpleList(self):
self.assertReturnType(
typehints.List[int], lambda xs: [1, 2], [typehints.Tuple[int, ...]])
self.assertReturnType(
typehints.List[typehints.Any],
lambda xs: list(xs), # List is a disallowed builtin
[typehints.Tuple[int, ...]])
def testListComprehension(self):
self.assertReturnType(
typehints.List[int],
lambda xs: [x for x in xs], [typehints.Tuple[int, ...]])
def testTupleListComprehension(self):
self.assertReturnType(
typehints.List[int],
lambda xs: [x for x in xs], [typehints.Tuple[int, int, int]])
self.assertReturnType(
typehints.List[typehints.Union[int, float]],
lambda xs: [x for x in xs], [typehints.Tuple[int, float]])
expected = typehints.List[typehints.Tuple[str, int]]
self.assertReturnType(
expected,
lambda kvs: [(kvs[0], v) for v in kvs[1]],
[typehints.Tuple[str, typehints.Iterable[int]]])
self.assertReturnType(
typehints.List[typehints.Tuple[str, typehints.Union[str, int], int]],
lambda L: [(a, a or b, b) for a, b in L],
[typehints.Iterable[typehints.Tuple[str, int]]])
def testGenerator(self):
def foo(x, y):
yield x
yield y
self.assertReturnType(typehints.Iterable[int], foo, [int, int])
self.assertReturnType(
typehints.Iterable[typehints.Union[int, float]], foo, [int, float])
def testGeneratorComprehension(self):
self.assertReturnType(
typehints.Iterable[int],
lambda xs: (x for x in xs), [typehints.Tuple[int, ...]])
def testBinOp(self):
self.assertReturnType(int, lambda a, b: a + b, [int, int])
self.assertReturnType(int, lambda a: a + 1, [int])
self.assertReturnType(
typehints.Any, lambda a, b: a + b, [int, typehints.Any])
self.assertReturnType(
typehints.List[typehints.Union[int, str]],
lambda a,
b: a + b, [typehints.List[int], typehints.List[str]])
def testCall(self):
f = lambda x, *args: x
self.assertReturnType(
typehints.Tuple[int, float], lambda: (f(1), f(2.0, 3)))
# We could do better here, but this is at least correct.
self.assertReturnType(
typehints.Tuple[int, typehints.Any], lambda: (1, f(x=1.0)))
def testClosure(self):
x = 1
y = 1.0
self.assertReturnType(typehints.Tuple[int, float], lambda: (x, y))
def testGlobals(self):
self.assertReturnType(int, lambda: global_int)
def testBuiltins(self):
self.assertReturnType(int, lambda x: len(x), [typehints.Any])
def testGetAttr(self):
self.assertReturnType(
typehints.Tuple[str, typehints.Any],
lambda: (typehints.__doc__, typehints.fake))
def testMethod(self):
class A(object):
def m(self, x):
return x
self.assertReturnType(int, lambda: A().m(3))
self.assertReturnType(float, lambda: A.m(A(), 3.0))
def testCallFunctionOnAny(self):
# Tests inference when CALL_FUNCTION/CALL_METHOD's function argument is Any.
# The function cannot be called but inference should continue. Also tests
# that LOAD_ATTR/LOAD_METHOD implementations don't load builtin functions,
# which also break inference since they don't disassemble.
def call_function_on_any(s):
# str.split is a builtin so opcodes.load_attr (load_method in Py3.7+)
# should put Any on the stack.
# If infer_return_type_func raises while trying to simulate CALL_FUNCTION
# on Any, the result will be Any instead of int.
s.split()
return 0
self.assertReturnType(int, call_function_on_any, [str])
def testAlwaysReturnsEarly(self):
def some_fn(v):
if v:
return 1
return 2
self.assertReturnType(int, some_fn)
def testDict(self):
self.assertReturnType(
typehints.Dict[typehints.Any, typehints.Any], lambda: {})
# yapf: disable
def testDictComprehension(self):
fields = []
expected_type = typehints.Dict[typehints.Any, typehints.Any]
self.assertReturnType(
expected_type, lambda row: {f: row[f] for f in fields}, [typehints.Any])
def testDictComprehensionSimple(self):
self.assertReturnType(
typehints.Dict[str, int], lambda _list: {'a': 1 for _ in _list}, [])
def testSet(self):
self.assertReturnType(
typehints.Set[typehints.Union[()]], lambda: {x for x in ()})
self.assertReturnType(
typehints.Set[int], lambda xs: {x for x in xs}, [typehints.List[int]])
# yapf: enable
def testDepthFunction(self):
def f(i):
return i
self.assertReturnType(typehints.Any, lambda i: f(i), [int], depth=0)
self.assertReturnType(int, lambda i: f(i), [int], depth=1)
def testDepthMethod(self):
class A(object):
def m(self, x):
return x
self.assertReturnType(typehints.Any, lambda: A().m(3), depth=0)
self.assertReturnType(int, lambda: A().m(3), depth=1)
self.assertReturnType(typehints.Any, lambda: A.m(A(), 3.0), depth=0)
self.assertReturnType(float, lambda: A.m(A(), 3.0), depth=1)
def testBuildTupleUnpackWithCall(self):
# Lambda uses BUILD_TUPLE_UNPACK_WITH_CALL opcode in Python 3.6, 3.7.
def fn(x1, x2, *unused_args):
return x1, x2
self.assertReturnType(
typehints.Tuple[typehints.Union[str, float, int],
typehints.Union[str, float, int]],
lambda x1,
x2,
_list: fn(x1, x2, *_list), [str, float, typehints.List[int]])
# No *args
self.assertReturnType(
typehints.Tuple[typehints.Union[str, typehints.List[int]],
typehints.Union[str, typehints.List[int]]],
lambda x1,
x2,
_list: fn(x1, x2, *_list), [str, typehints.List[int]])
def testCallFunctionEx(self):
# Test when fn arguments are built using BUiLD_LIST.
def fn(*args):
return args
self.assertReturnType(
typehints.List[typehints.Union[str, float]],
lambda x1,
x2: fn(*[x1, x2]), [str, float])
def testCallFunctionExKwargs(self):
def fn(x1, x2, **unused_kwargs):
return x1, x2
# Keyword args are currently unsupported for CALL_FUNCTION_EX.
self.assertReturnType(
typehints.Any,
lambda x1,
x2,
_dict: fn(x1, x2, **_dict), [str, float, typehints.List[int]])
def testInstanceToType(self):
class MyClass(object):
def method(self):
pass
test_cases = [
(typehints.Dict[str, int], {
'a': 1
}),
(typehints.Dict[str, typehints.Union[str, int]], {
'a': 1, 'b': 'c'
}),
(typehints.Dict[typehints.Any, typehints.Any], {}),
(typehints.Set[str], {'a'}),
(typehints.Set[typehints.Union[str, float]], {'a', 0.4}),
(typehints.Set[typehints.Any], set()),
(typehints.FrozenSet[str], frozenset(['a'])),
(
typehints.FrozenSet[typehints.Union[str, float]],
frozenset(['a', 0.4])),
(typehints.FrozenSet[typehints.Any], frozenset()),
(typehints.Tuple[int], (1, )),
(typehints.Tuple[int, int, str], (1, 2, '3')),
(typehints.Tuple[()], ()),
(typehints.List[int], [1]),
(typehints.List[typehints.Union[int, str]], [1, 'a']),
(typehints.List[typehints.Any], []),
(type(None), None),
(type(MyClass), MyClass),
(MyClass, MyClass()),
(type(MyClass.method), MyClass.method),
(types.MethodType, MyClass().method),
(row_type.RowTypeConstraint([('x', int)]), beam.Row(x=37)),
]
for expected_type, instance in test_cases:
self.assertEqual(
expected_type,
trivial_inference.instance_to_type(instance),
msg=instance)
def testRow(self):
self.assertReturnType(
row_type.RowTypeConstraint([('x', int), ('y', str)]),
lambda x,
y: beam.Row(x=x + 1, y=y), [int, str])
self.assertReturnType(
row_type.RowTypeConstraint([('x', int), ('y', str)]),
lambda x: beam.Row(x=x, y=str(x)), [int])
def testRowAttr(self):
self.assertReturnType(
typehints.Tuple[int, str],
lambda row: (row.x, getattr(row, 'y')),
[row_type.RowTypeConstraint([('x', int), ('y', str)])])
if __name__ == '__main__':
unittest.main()
| 33.739535 | 80 | 0.628136 |
6e65894e2c5b9c175719865cffeb5db73645c7d7 | 647 | py | Python | python/federatedml/nn/backend/pytorch/custom/loss.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 3,787 | 2019-08-30T04:55:10.000Z | 2022-03-31T23:30:07.000Z | python/federatedml/nn/backend/pytorch/custom/loss.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 1,439 | 2019-08-29T16:35:52.000Z | 2022-03-31T11:55:31.000Z | python/federatedml/nn/backend/pytorch/custom/loss.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 1,179 | 2019-08-29T16:18:32.000Z | 2022-03-31T12:55:38.000Z | # Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
import custom loss here
"""
| 34.052632 | 75 | 0.741886 |
133831a0cc29df6a38b856484df9cdb29c102e75 | 1,525 | py | Python | pcat2py/class/26f3b76c-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/26f3b76c-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/26f3b76c-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
################################################################################
# 26f3b76c-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "26f3b76c-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\Office\14.0\outlook\options\pubcal', 'RestrictedAccessOnly')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\Office\14.0\outlook\options\pubcal', ('RestrictedAccessOnly=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\outlook'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\outlook\options'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\outlook\options\pubcal'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\outlook\options\pubcal' -name 'RestrictedAccessOnly' -value 1 -Type DWord")
| 40.131579 | 170 | 0.615082 |
98a055ff70c4510f671df57eeae314b5c92fbe86 | 2,241 | py | Python | src/lgr_manage/views/reference_lgr.py | GuillaumeBlanchet/lgr-django | 429ca5ddb9311cfb1a7ddc906b32d57780585f40 | [
"BSD-3-Clause"
] | null | null | null | src/lgr_manage/views/reference_lgr.py | GuillaumeBlanchet/lgr-django | 429ca5ddb9311cfb1a7ddc906b32d57780585f40 | [
"BSD-3-Clause"
] | null | null | null | src/lgr_manage/views/reference_lgr.py | GuillaumeBlanchet/lgr-django | 429ca5ddb9311cfb1a7ddc906b32d57780585f40 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django import views
from django.contrib import messages
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic.detail import SingleObjectMixin
from lgr_models.models import RefLgr
from lgr_manage.forms import RefLgrCreateForm
from lgr_manage.views.common import BaseListAdminView, BaseAdminView
class RefLgrListView(BaseListAdminView):
model = RefLgr
template_name = 'lgr_idn_table_review_admin/ref_lgr.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = RefLgrCreateForm()
return context
class RefLgrCreateView(BaseAdminView, views.generic.CreateView):
model = RefLgr
form_class = RefLgrCreateForm
template_name = 'lgr_idn_table_review_admin/ref_lgr.html'
success_url = reverse_lazy('lgr_idn_admin_ref_lgr')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object_list'] = RefLgrListView.model._default_manager.all()
return context
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS, _('New Reference LGR created'))
return super().form_valid(form)
def form_invalid(self, form):
messages.add_message(self.request, messages.ERROR, _('Failed to create Reference LGR'))
return super().form_invalid(form)
class RefLgrView(BaseAdminView, views.View):
def get(self, request, *args, **kwargs):
view = RefLgrListView.as_view()
return view(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
view = RefLgrCreateView.as_view()
return view(request, *args, **kwargs)
class RefLgrDeleteView(BaseAdminView, views.generic.DeleteView):
model = RefLgr
success_url = reverse_lazy('lgr_idn_admin_ref_lgr')
pk_url_kwarg = 'lgr_id'
class DisplayRefLgrView(SingleObjectMixin, views.View):
pk_url_kwarg = 'lgr_id'
model = RefLgr
def get(self, request, *args, **kwargs):
lgr = self.get_object()
return HttpResponse(lgr.file.read(), content_type='text/xml', charset='UTF-8')
| 32.955882 | 95 | 0.722445 |
74f783628f7105df40698a9cce9839a701a1fc4d | 10,513 | py | Python | skylink/skylink.py | enourbakhsh/SkyLink | 3fd7d919145344515cc9d8ede90518a234421d51 | [
"MIT"
] | null | null | null | skylink/skylink.py | enourbakhsh/SkyLink | 3fd7d919145344515cc9d8ede90518a234421d51 | [
"MIT"
] | null | null | null | skylink/skylink.py | enourbakhsh/SkyLink | 3fd7d919145344515cc9d8ede90518a234421d51 | [
"MIT"
] | null | null | null | """
SkyLink
"""
import numpy as np
from astropy.table import Table, vstack
from astropy.coordinates import SkyCoord
from .fof import fastmatch
from busypal import BusyPal
import pandas as pd
import pickle
import os
import sys
import subprocess
import inspect
import time
import colored as cl
import datetime
fof_path = inspect.getfile(fastmatch)
"""
Important notes!
TODO: I still have some functions and lines of code in this python file shamelessly borrowed from
FoFCatalogMatching since I wanted to be able to ingest the input catalogs exactly the same way that
FoFCatalogMatching does and use it as a benchmark to verify the results.
That's why I adopted some codes and also the style of the outputs from the aforementioned package, at least for now.
TODO: do not allow users to use nprocs more than the number of their processors
`linking_length` as a dictionary has not been fully tested but it outputs the results.
"""
__all__ = ['match']
# MPI
# from mpi4py import MPI
# # comm = MPI.COMM_WORLD
# rank = comm.Get_rank()
# nprocs = comm.Get_size()
# comm = MPI.COMM_SELF.Spawn(sys.executable, args=[fof_path], maxprocs=8)
# also https://www.endpoint.com/blog/2015/01/28/getting-realtime-output-using-python
# modified from https://stackoverflow.com/questions/18421757/live-output-from-subprocess-command
# to add stderr to stdout
def _run_command(cmd,points,points_path,group_ids_path):
# - remove the old results just in case
if os.path.exists(group_ids_path):
os.remove(group_ids_path)
with open(points_path, 'wb') as h:
pickle.dump(points, h)
process = subprocess.Popen(cmd, shell=True,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, bufsize=1,
universal_newlines=True)
while True: #process.poll() is None: #.stdout.readable():
line = process.stdout.readline()
# if not line:
# print("\r\r" + str(line), end='')
# sys.stdout.flush()
# sys.stdout.write(f'{line}') # and whatever you want to do...
# print(f'\r {line} \r', end='', flush=True)
# time.sleep(1)
# # break
# print(line.strip())
# line = line.replace('\n', '')
if '%|' in line: # tqdm line
print(f'\r{line.rstrip()}', end='', flush=True)
else:
print(f'{line.rstrip()}') #, end='\r', flush=True)
# if '100%|' in line:
# print('\n')
if not line: # EOF
returncode = process.poll()
if returncode is not None:
break
sys.stdout.flush()
time.sleep(0.02) # cmd closed stdout, but not exited yet
return_code = process.poll()
if return_code!=0:
raise RuntimeError(f"Something went wrong in '{fof_path}' with the return code {return_code}")
if os.path.exists(points_path):
os.remove(points_path)
with open(group_ids_path, 'rb') as h:
group_id = pickle.load(h)
os.remove(group_ids_path)
return group_id
def _check_max_count(count):
if count is not None:
count = int(count)
if count < 1:
raise ValueError('`count` must be None or a positive integer.')
return count
def match(catalog_dict, linking_lengths=None,
ra_label='ra', dec_label='dec',
ra_unit='deg', dec_unit='deg',
catalog_len_getter=len,
mpi=False, mpi_path='mpirun', graph_lib='networkit', num_threads=None,
nprocs=2, overlap=1.0, cache_root=os.getcwd(), sort=True,
return_pandas=False, storekdtree=True, use_linked_mask=True, verbose=1,
show_progress=True, silent=False, **tqdm_kwargs):
"""
Match multiple catalogs.
Ruturns an astropy Table that have group id and row id in each catalog.
Parameters
----------
catalog_dict : dict
Catalogs to match.
In the format of {'cat_a': catalog_table_a, 'cat_b': catalog_table_b, }
linking_lengths : dict or float
FoF linking length. Assuming the unit of arcsecond.
Can specify multiple values with the maximal allowed numbers in each group.
Use `None` to mean to constraint.
Example: {5.0: 5, 4.0: 5, 3.0: 4, 2.0: 3, 1.0: None}
ra_label : str, optional, default: 'ra'
dec_label : str, optional, default: 'dec'
ra_unit : str or astropy.units.Unit, optional, default: 'deg'
dec_unit : str or astropy.units.Unit, optional, default: 'deg'
catalog_len_getter : callable, optional, default: len
Returns
-------
matched_catalog : astropy.table.Table
"""
t0 = datetime.datetime.now()
if verbose:
if nprocs>1:
print(cl.stylize('✔', cl.fg('green')+cl.attr('bold'))+f' Running {nprocs} parallel jobs')
elif nprocs==1:
print(cl.stylize('✔', cl.fg('green')+cl.attr('bold'))+f' Running without parallelization')
else:
raise ValueError('illegal `nproc`')
if not show_progress:
skip_busypal = 1
# disable_tqdm = True
else:
skip_busypal = 0
# disable_tqdm = False
if silent:
verbose = 0
skip_busypal = 2
# disable_tqdm = True
if mpi:
if not cache_root=='' and not cache_root.endswith('/'):
cache_root += '/'
points_path = cache_root+'points.cache'
group_ids_path = cache_root+'group_ids.cache'
if isinstance(linking_lengths, dict):
linking_lengths = [(float(k), _check_max_count(linking_lengths[k])) \
for k in sorted(linking_lengths, key=float, reverse=True)]
else:
linking_lengths = [(float(linking_lengths), None)]
# WITH BUSYPAL('LOADING DATA ....')
xstacked_catalog = []
for catalog_key, catalog in catalog_dict.items():
if catalog is None:
continue
n_rows = catalog_len_getter(catalog)
xstacked_catalog.append(Table({
'ra': catalog[ra_label],
'dec': catalog[dec_label],
'row_index': np.arange(n_rows),
'catalog_key': np.repeat(catalog_key, n_rows),
}))
if not xstacked_catalog:
raise ValueError('No catalogs to merge!!')
stacked_catalog = vstack(xstacked_catalog, 'exact', 'error')
points = SkyCoord(stacked_catalog['ra'], stacked_catalog['dec'], unit=(ra_unit, dec_unit)) #.cartesian.xyz.value.T
# TODO: faster non-internal match i.e. when you don't need fof
coords1 = None #SkyCoord(xstacked_catalog[0]['ra'], xstacked_catalog[0]['dec'], unit=(ra_unit, dec_unit)) #.cartesian.xyz.value.T
coords2 = None #SkyCoord(xstacked_catalog[1]['ra'], xstacked_catalog[1]['dec'], unit=(ra_unit, dec_unit)) #.cartesian.xyz.value.T
del stacked_catalog['ra'], stacked_catalog['dec']
group_id = regroup_mask = group_id_shift = None
for linking_length_arcsec, max_count in linking_lengths:
if group_id is None:
if mpi:
# cmd = [f'{mpi_path} -n {nprocs}', sys.executable, fof_path, f'--points_path={points_path}', f'--linking_length={d}', f'--group_ids_path={group_ids_path}', f'--tqdm_kwargs={tqdm_kwargs}'] # reassign_group_indices=False by default in fof's argparse, you can set the flag --reassign_group_indices to make it True
cmd = f'{mpi_path} -n {nprocs} {sys.executable} {fof_path} --points_path={points_path} --linking_length={linking_length_arcsec} --group_ids_path={group_ids_path} --tqdm_kwargs={tqdm_kwargs}' # reassign_group_indices=False by default in fof's argparse, you can set the flag --reassign_group_indices to make it True
# cmd = 'mpirun -n 4 /usr/local/anaconda3/bin/python /usr/local/anaconda3/lib/python3.7/site-packages/fast3tree/fof.py'
print(f'Running the command: {cmd}')
group_id = _run_command(cmd,points,points_path,group_ids_path)
else:
# group_id = find_friends_of_friends(points=points, linking_length=d, reassign_group_indices=False, **tqdm_kwargs)
group_id = fastmatch(coords=points, coords1=coords1, coords2=coords2, linking_length=linking_length_arcsec, reassign_group_indices=False, graph_lib=graph_lib, num_threads=num_threads, storekdtree=storekdtree, use_linked_mask=use_linked_mask, njobs=nprocs, verbose=verbose, show_progress=show_progress, silent=silent, **tqdm_kwargs)
# print('gereftam!!!')
else:
if mpi:
cmd = [f'{mpi_path} -n {nprocs}', sys.executable, fof_path, f'--points_path={points_path}', f'--linking_length={linking_length_arcsec}', f'--group_ids_path={group_ids_path}', f'--tqdm_kwargs={tqdm_kwargs}'] # reassign_group_indices=False by default in fof's argparse, you can set the flag --reassign_group_indices to make it True
group_id = _run_command(cmd,points[regroup_mask],points_path,group_ids_path)
else:
group_id[regroup_mask] = fastmatch(points=points[regroup_mask], linking_length=linking_length_arcsec, reassign_group_indices=False)
group_id[regroup_mask] += group_id_shift
if max_count is None:
_, group_id = np.unique(group_id, return_inverse=True)
break
with BusyPal('Reassigning group ids with consecutive numbers', fmt='{spinner} {message}', skip=skip_busypal, verbose=verbose):
_, group_id, counts = np.unique(group_id, return_inverse=True, return_counts=True)
group_id_shift = group_id.max() + 1
regroup_mask = (counts[group_id] > max_count)
del counts
if not regroup_mask.any():
break
group_id = pd.factorize(group_id)[0] # very fast!
stacked_catalog['group_id'] = group_id
if sort:
with BusyPal('Sorting', fmt='{spinner} {message}', skip=skip_busypal, verbose=verbose):
stacked_catalog = stacked_catalog.group_by(['group_id','row_index'])
if return_pandas:
if verbose:
print(cl.stylize('✔ Success!', cl.fg('green')+cl.attr('bold'))+f' Took {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
return stacked_catalog.to_pandas()
else:
if verbose:
print(cl.stylize(f'✔ Success! Took {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} to execute.', cl.attr('bold')+cl.fg('green')))
return stacked_catalog
| 42.735772 | 347 | 0.648816 |
a988f522ca866b5f32b089d7ea997f7f4cd5be60 | 737 | py | Python | leetCode/P4_MedianofTwoSortedArrays.py | itsvinayak/cracking_the_codeing_interview | 7347f7e831b306c4c4314bd2d41809a5b5741497 | [
"MIT"
] | 4 | 2020-07-19T03:49:43.000Z | 2021-06-29T07:13:39.000Z | leetCode/P4_MedianofTwoSortedArrays.py | itsvinayak/cracking_the_codeing_interview | 7347f7e831b306c4c4314bd2d41809a5b5741497 | [
"MIT"
] | 1 | 2020-04-01T06:40:45.000Z | 2020-04-01T06:41:22.000Z | leetCode/P4_MedianofTwoSortedArrays.py | itsvinayak/cracking_the_codeing_interview | 7347f7e831b306c4c4314bd2d41809a5b5741497 | [
"MIT"
] | 1 | 2020-08-14T18:14:04.000Z | 2020-08-14T18:14:04.000Z | class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
tempArr = []
i = 0
j = 0
m = len(nums1)
n = len(nums2)
while i < m and j < n:
if nums1[i] < nums2[j]:
tempArr.append(nums1[i])
i += 1
else:
tempArr.append(nums2[j])
j += 1
while i < m:
tempArr.append(nums1[i])
i += 1
while j < n:
tempArr.append(nums2[j])
j += 1
mid = (m + n) // 2
if (m + n) % 2 == 0:
return (tempArr[mid - 1] + tempArr[mid]) / 2
return float(tempArr[mid])
| 27.296296 | 83 | 0.39213 |
600702bf4a6a35d30d76aabd6c3b2554d5d13a4b | 11,859 | py | Python | src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py | xaliciayang/azure-cli | 38c80c875e8a79d08d06a2f42ec82fd54934343e | [
"MIT"
] | 4 | 2016-08-23T06:19:01.000Z | 2018-03-20T22:47:15.000Z | src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py | xaliciayang/azure-cli | 38c80c875e8a79d08d06a2f42ec82fd54934343e | [
"MIT"
] | 120 | 2018-03-27T19:14:40.000Z | 2020-12-10T23:53:35.000Z | src/azure-cli/azure/cli/command_modules/botservice/bot_json_formatter.py | xaliciayang/azure-cli | 38c80c875e8a79d08d06a2f42ec82fd54934343e | [
"MIT"
] | 11 | 2018-08-23T21:31:06.000Z | 2020-09-03T21:39:51.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
from collections import Counter
import sys
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.command_modules.botservice.web_app_operations import WebAppOperations
from azure.cli.command_modules.botservice.kudu_client import KuduClient
class BotJsonFormatter: # pylint:disable=too-few-public-methods
@staticmethod
def create_bot_json(cmd, client, resource_group_name, resource_name, logger, app_password=None, # pylint:disable=too-many-locals
raw_bot_properties=None, password_only=True):
"""
:param cmd:
:param client:
:param resource_group_name:
:param resource_name:
:param logger:
:param app_password:
:param raw_bot_properties:
:return: Dictionary
"""
if not raw_bot_properties:
raw_bot_properties = client.bots.get(
resource_group_name=resource_group_name,
resource_name=resource_name
)
# Initialize names bot_file and secret to capture botFilePath and botFileSecret values from the application's
# settings.
bot_file = None
bot_file_secret = None
profile = Profile(cli_ctx=cmd.cli_ctx)
if not app_password:
site_name = WebAppOperations.get_bot_site_name(raw_bot_properties.properties.endpoint)
app_settings = WebAppOperations.get_app_settings(
cmd=cmd,
resource_group_name=resource_group_name,
name=site_name
)
app_password_values = [item['value'] for item in app_settings if item['name'] == 'MicrosoftAppPassword']
app_password = app_password_values[0] if app_password_values else None
if not app_password:
bot_file_values = [item['value'] for item in app_settings if item['name'] == 'botFilePath']
bot_file = bot_file_values[0] if bot_file_values else None
bot_file_secret_values = [item['value'] for item in app_settings if item['name'] == 'botFileSecret']
bot_file_secret = bot_file_secret_values[0] if bot_file_secret_values else None
if not bot_file and not app_password:
bot_site_name = WebAppOperations.get_bot_site_name(raw_bot_properties.properties.endpoint)
scm_url = WebAppOperations.get_scm_url(cmd,
resource_group_name,
bot_site_name,
None)
# TODO: Reevaluate "Public-or-Gov" Azure logic.
is_public_azure = ('azurewebsites.net' in raw_bot_properties.properties.endpoint or
'.net' in raw_bot_properties.properties.endpoint or
'.com' in raw_bot_properties.properties.endpoint)
host = 'https://portal.azure.com/' if is_public_azure else 'https://portal.azure.us/'
subscription_id = get_subscription_id(cmd.cli_ctx)
tenant_id = profile.get_subscription(subscription=client.config.subscription_id)['tenantId']
settings_url = host + '#@{}/resource/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/app_settings'.format(tenant_id, subscription_id, resource_group_name, resource_name) # pylint: disable=line-too-long
logger.warning('"MicrosoftAppPassword" and "botFilePath" not found in application settings')
logger.warning('To see your bot\'s application settings, visit %s' % settings_url)
logger.warning('To visit your deployed bot\'s code on Azure, visit Kudu for your bot at %s' % scm_url)
elif not app_password and bot_file:
# We have the information we need to obtain the MSA App app password via bot file data from Kudu.
kudu_client = KuduClient(cmd, resource_group_name, resource_name, raw_bot_properties, logger)
bot_file_data = kudu_client.get_bot_file(bot_file)
app_password = BotJsonFormatter.__decrypt_bot_file(bot_file_data, bot_file_secret, logger, password_only)
return {
'type': 'abs',
'id': raw_bot_properties.name,
'name': raw_bot_properties.properties.display_name,
'appId': raw_bot_properties.properties.msa_app_id,
'appPassword': app_password,
'endpoint': raw_bot_properties.properties.endpoint,
'resourceGroup': str(resource_group_name),
'tenantId': profile.get_subscription(subscription=client.config.subscription_id)['tenantId'],
'subscriptionId': client.config.subscription_id,
'serviceName': resource_name
}
@staticmethod
def __decrypt_bot_file(bot_file_data, bot_file_secret, logger, password_only=True):
"""Decrypt .bot file retrieved from Kudu.
:param bot_file_data:
:param bot_file_secret:
:param logger:
:return:
"""
services = bot_file_data['services']
if sys.version_info.major >= 3:
decrypt = BotJsonFormatter.__decrypt_py3
else:
decrypt = BotJsonFormatter.__decrypt_py2
if password_only:
# Get all endpoints that have potentially valid appPassword values
endpoints = [service for service in services
if service.get('type') == 'endpoint' and service.get('appPassword')]
# Reduce the retrieved endpoints to just their passwords
app_passwords = [e['appPassword'] for e in endpoints]
if len(app_passwords) == 1:
return decrypt(bot_file_secret, app_passwords[0], logger)
if len(app_passwords) > 1:
logger.info('More than one Microsoft App Password found in bot file. Evaluating if more than one '
'unique App Password exists.')
app_passwords = [decrypt(bot_file_secret, pw, logger) for pw in app_passwords]
unique_passwords = list(Counter(app_passwords))
if len(unique_passwords) == 1:
logger.info('One unique Microsoft App Password found, returning password.')
return unique_passwords[0]
logger.warning('More than one unique Microsoft App Password found in the bot file, please '
'manually retrieve your bot file from Kudu to retrieve this information.')
logger.warning('No Microsoft App Password returned.')
return ''
logger.warning('No Microsoft App Passwords found in bot file.')
return ''
for service in services:
# For Azure Blob Storage
if service.get('connectionString'):
service['connectionString'] = decrypt(bot_file_secret, service['connectionString'], logger)
# For LUIS and Dispatch
if service.get('authoringKey'):
service['authoringKey'] = decrypt(bot_file_secret, service['authoringKey'], logger)
# For LUIS and QnA Maker
if service.get('subscriptionKey'):
service['subscriptionKey'] = decrypt(bot_file_secret, service['subscriptionKey'], logger)
# For QnA Maker
if service.get('endpointKey'):
service['endpointKey'] = decrypt(bot_file_secret, service['endpointKey'], logger)
# For connecting to the bot
if service.get('appPassword'):
service['appPassword'] = decrypt(bot_file_secret, service['appPassword'], logger)
# For Application Insights
if service.get('instrumentationKey'):
service['instrumentationKey'] = decrypt(bot_file_secret, service['instrumentationKey'], logger)
if service.get('apiKeys'):
for apiKey in service['apiKeys']:
service['apiKeys'][apiKey] = decrypt(bot_file_secret, service['apiKeys'][apiKey], logger)
# For Cosmos DB
if service.get('key'):
service['key'] = decrypt(bot_file_secret, service['key'], logger)
# For generic services
if service.get('configuration') and isinstance(service.get('configuration'), dict):
for key in service['configuration']:
service['configuration'][key] = decrypt(bot_file_secret, service['configuration'][key], logger)
return services
@staticmethod
def __decrypt_py3(secret, encrypted_value, logger):
# If the string length is 0 or no secret was passed in, return the empty string.
if not encrypted_value or not secret:
return encrypted_value
parts = encrypted_value.split("!")
if len(parts) != 2:
logger.warn('Encrypted value "%s" not in standard encrypted format, decryption skipped.' % encrypted_value)
return encrypted_value
iv_text = parts[0]
encrypted_text = parts[1]
iv_bytes = base64.standard_b64decode(str.encode(iv_text))
secret_bytes = base64.standard_b64decode(str.encode(secret))
if len(iv_bytes) != 16:
logger.warn('Initialization Vector for "%s" not valid, decryption skipped.' % encrypted_value)
return encrypted_value
if len(secret_bytes) != 32:
logger.warn('Passed in secret length is invalid, decryption skipped.')
return encrypted_value
cipher = Cipher(algorithms.AES(secret_bytes), modes.CBC(iv_bytes), backend=default_backend())
decryptor = cipher.decryptor()
decrypted_bytes = decryptor.update(base64.standard_b64decode(str.encode(encrypted_text))) + decryptor.finalize()
decrypted_string = decrypted_bytes.decode('utf-8')
return ''.join([char for char in decrypted_string if ord(char) > 31])
@staticmethod
def __decrypt_py2(secret, encrypted_value, logger):
# If the string length is 0 or no secret was passed in, return the empty string.
if not encrypted_value or not secret:
return encrypted_value
parts = encrypted_value.split("!")
if len(parts) != 2:
logger.warn('Encrypted value "%s" not in standard encrypted format, decryption skipped.' % encrypted_value)
return encrypted_value
iv_text = parts[0]
encrypted_text = parts[1]
iv_bytes = base64.standard_b64decode(iv_text)
secret_bytes = base64.standard_b64decode(secret)
if len(iv_bytes) != 16:
logger.warn('Initialization Vector for "%s" not valid, decryption skipped.' % encrypted_value)
return encrypted_value
if len(secret_bytes) != 32:
logger.warn('Passed in secret length is invalid, decryption skipped.')
return encrypted_value
cipher = Cipher(algorithms.AES(secret_bytes), modes.CBC(iv_bytes), backend=default_backend())
decryptor = cipher.decryptor()
decrypted_bytes = decryptor.update(base64.standard_b64decode(encrypted_text)) + decryptor.finalize()
decrypted_string = decrypted_bytes.encode('utf-8')
return ''.join([char for char in decrypted_string if ord(char) > 31])
| 51.116379 | 247 | 0.633865 |
b91bf33544ae4ababd27303f149a1d0fd53396d7 | 3,823 | py | Python | tests/examples/market_maker/test_on_chain_market_maker.py | ehanoc/vyper | 26403f41bc714d3de32dbab5eacb70ccdaffa2d5 | [
"MIT"
] | 1 | 2019-02-21T09:49:52.000Z | 2019-02-21T09:49:52.000Z | tests/examples/market_maker/test_on_chain_market_maker.py | LayerXcom/vyper | 26403f41bc714d3de32dbab5eacb70ccdaffa2d5 | [
"MIT"
] | 1 | 2019-02-22T23:21:51.000Z | 2019-02-23T00:46:17.000Z | tests/examples/market_maker/test_on_chain_market_maker.py | LayerXcom/vyper | 26403f41bc714d3de32dbab5eacb70ccdaffa2d5 | [
"MIT"
] | 1 | 2019-02-18T18:50:53.000Z | 2019-02-18T18:50:53.000Z | import pytest
@pytest.fixture
def market_maker(get_contract):
with open('examples/market_maker/on_chain_market_maker.vy') as f:
contract_code = f.read()
return get_contract(contract_code)
TOKEN_NAME = "Vypercoin"
TOKEN_SYMBOL = "FANG"
TOKEN_DECIMALS = 18
TOKEN_INITIAL_SUPPLY = (21 * 10 ** 6)
TOKEN_TOTAL_SUPPLY = TOKEN_INITIAL_SUPPLY * (10 ** TOKEN_DECIMALS)
@pytest.fixture
def erc20(get_contract):
with open('examples/tokens/ERC20.vy') as f:
contract_code = f.read()
return get_contract(contract_code, *[TOKEN_NAME, TOKEN_SYMBOL, TOKEN_DECIMALS, TOKEN_INITIAL_SUPPLY])
def test_initial_statet(market_maker):
assert market_maker.totalEthQty() == 0
assert market_maker.totalTokenQty() == 0
assert market_maker.invariant() == 0
assert market_maker.owner() is None
def test_initiate(w3, market_maker, erc20, assert_tx_failed):
a0 = w3.eth.accounts[0]
erc20.approve(market_maker.address, 2 * 10**18, transact={})
market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18})
assert market_maker.totalEthQty() == 2 * 10**18
assert market_maker.totalTokenQty() == 1 * 10**18
assert market_maker.invariant() == 2 * 10**36
assert market_maker.owner() == a0
assert erc20.name() == TOKEN_NAME
assert erc20.decimals() == TOKEN_DECIMALS
# Initiate cannot be called twice
assert_tx_failed(lambda: market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18}))
def test_eth_to_tokens(w3, market_maker, erc20):
a1 = w3.eth.accounts[1]
erc20.approve(market_maker.address, 2 * 10**18, transact={})
market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18})
assert erc20.balanceOf(market_maker.address) == 1000000000000000000
assert erc20.balanceOf(a1) == 0
assert market_maker.totalTokenQty() == 1000000000000000000
assert market_maker.totalEthQty() == 2000000000000000000
market_maker.ethToTokens(transact={'value': 100, 'from': a1})
assert erc20.balanceOf(market_maker.address) == 999999999999999950
assert erc20.balanceOf(a1) == 50
assert market_maker.totalTokenQty() == 999999999999999950
assert market_maker.totalEthQty() == 2000000000000000100
def test_tokens_to_eth(w3, tester, market_maker, erc20):
a1 = w3.eth.accounts[1]
erc20.transfer(a1, 2 * 10**18, transact={})
erc20.approve(market_maker.address, 2 * 10**18, transact={'from': a1})
market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18, 'from': a1})
assert w3.eth.getBalance(market_maker.address) == 2000000000000000000
assert w3.eth.getBalance(a1) == 999997999999999999999900
assert market_maker.totalTokenQty() == 1000000000000000000
erc20.approve(market_maker.address, 1 * 10**18, transact={'from': a1})
market_maker.tokensToEth(1 * 10**18, transact={'from': a1})
assert w3.eth.getBalance(market_maker.address) == 1000000000000000000
assert w3.eth.getBalance(a1) == 999998999999999999999900
assert market_maker.totalTokenQty() == 2000000000000000000
assert market_maker.totalEthQty() == 1000000000000000000
def test_owner_withdraw(w3, tester, market_maker, erc20, assert_tx_failed):
a0, a1 = w3.eth.accounts[:2]
erc20.approve(market_maker.address, 2 * 10**18, transact={})
market_maker.initiate(erc20.address, 1 * 10**18, transact={'value': 2 * 10**18})
assert w3.eth.getBalance(a0) == 999994000000000000000000
assert erc20.balanceOf(a0) == 20999999000000000000000000
# Only owner can call ownerWithdraw
assert_tx_failed(lambda: market_maker.ownerWithdraw(transact={'from': a1}))
market_maker.ownerWithdraw(transact={})
assert w3.eth.getBalance(a0) == 999996000000000000000000
assert erc20.balanceOf(a0) == 21000000000000000000000000
| 41.554348 | 110 | 0.723516 |
0f26685d2f63fb290c2569e2a99fd96e88ef0d18 | 5,829 | py | Python | transfer/client.py | IsaPeter/PythonProjects | 62885fa6d4180e7b2c83fbb67541dc3fc3e29489 | [
"Apache-2.0"
] | null | null | null | transfer/client.py | IsaPeter/PythonProjects | 62885fa6d4180e7b2c83fbb67541dc3fc3e29489 | [
"Apache-2.0"
] | null | null | null | transfer/client.py | IsaPeter/PythonProjects | 62885fa6d4180e7b2c83fbb67541dc3fc3e29489 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/enc python3
import socket, os, json, sys, argparse
target_address = '127.0.0.1'
target_port = 9999
file_data = {'name':'','size':'','method':''}
file_upload = False
file_download = False
file_name = ""
out_fname = ""
list_files = False
recv_len = 1024
def parsing_arguments():
global target_address, target_port, file_upload, file_download, file_name, out_fname, list_files
parser = argparse.ArgumentParser()
parser.add_argument('-t','--target',help='The target host server')
parser.add_argument('-p','--port',help='The target host port')
parser.add_argument('-U','--upload',action='store_true',help='Upload a file')
parser.add_argument('-D','--download',action='store_true',help='Download a file')
parser.add_argument('-f','--file',help='file to upload of download')
parser.add_argument('-o','--out-file',dest='outfile',help='Output file name')
parser.add_argument('-L','--list',dest='listfiles',action='store_true',help='List Remote Files')
args = parser.parse_args()
if args.target: target_address = args.target
if args.port : target_port = int(args.port)
if args.upload: file_upload = True
if args.download: file_download = True
if args.file: file_name = args.file
if args.outfile: out_fname = args.outfile
if args.listfiles: list_files = args.listfiles
def list_remote_files(client):
global recv_len,file_data
try:
file_data['method'] = 'list'
header = json.dumps(file_data)
client.send(header.encode())
r = 1
response = b''
while r:
data = client.recv(recv_len)
response += data
r = len(data)
if r < recv_len:
recv_len = 0
break
received_data = data.decode()
files = json.loads(received_data)
if len(files) > 0:
for f in files:
print(f)
except Exception as x:
print("Failed to list remote host")
print(x)
def download(sock,filename):
global file_name, out_fname, file_data
try:
# sending download request
file_data['name'] = filename
file_data['method'] = 'download'
if out_fname == "": out_fname = filename
header = json.dumps(file_data)
sock.send(header.encode())
resp = sock.recv(1024).decode()
resp_fd = json.loads(resp)
if resp_fd['status'] == 'download ok':
size = int(resp_fd['size'])
remaining = size
received = 0
with open(out_fname,'wb') as f:
while remaining >0:
recv_data = b''
if size < 1024:
recv_data = sock.recv(size)
f.write(recv_data)
received = size
remaining = 0
else:
if received < size:
if remaining < 1024:
recv_data = sock.recv(remaining)
received += remaining
remaining = 0
f.write(recv_data)
else:
recv_data = sock.recv(1024)
received += 1024
remaining -= 1024
f.write(recv_data)
print("file uploading {total}/{current} ==> {filename}\r".format(total=str(size),current=str(received),filename=file_data['name']),end='')
print()
print("Download Successful!")
f.close()
sock.close()
else:
print(resp_fd['status'])
sys.exit(1)
except Exception as x:
print("Download Failed")
print(x)
def upload(sock,filename):
try:
file_data['size'] = os.path.getsize(filename)
file_data['name'] = filename
file_data['method'] = 'upload'
header = json.dumps(file_data)
currentp = 0
nextp = 1024
remaining = int(file_data['size'])
with open(filename,'rb') as f:
data = f.read()
sock.send(header.encode())
ok = sock.recv(10).decode()
if ok.lower() != "upload ok":
sys.exit(1)
else:
while remaining > 0:
if len(data) < 1024:
send_data = data
currentp = remaining
remaining = 0
else:
if remaining < 1024 :
send_data = data[currentp:currentp+remaining]
currentp += remaining
remaining = 0
else:
send_data = data[currentp:nextp]
currentp += 1024
nextp += 1024
remaining -= 1024
sock.send(send_data)
print("{total}/{current}\r".format(total=str(file_data['size']),current=str(currentp)),end='')
print()
print("Upload OK")
except Exception as x:
print("Upload Failed")
print(x)
def main():
global target_address, target_port, file_upload, file_download, file_name, list_files
parsing_arguments()
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect((target_address,target_port))
if list_files:
list_remote_files(client)
sys.exit(0)
if file_upload:
upload(client,file_name)
if file_download:
download(client,file_name)
main() | 34.087719 | 158 | 0.51381 |
98dd29ea6a3331b8d79ec8164497be4c9b166f1a | 3,386 | py | Python | metrics/fvd/score.py | MLIA/srvp | 05661faf767cdb33d40fc328679bbe50c3a1f938 | [
"Apache-2.0"
] | 64 | 2020-02-24T03:17:39.000Z | 2022-03-11T07:40:26.000Z | metrics/fvd/score.py | MLIA/srvp | 05661faf767cdb33d40fc328679bbe50c3a1f938 | [
"Apache-2.0"
] | 12 | 2020-06-15T07:17:09.000Z | 2021-08-23T12:41:51.000Z | metrics/fvd/score.py | MLIA/srvp | 05661faf767cdb33d40fc328679bbe50c3a1f938 | [
"Apache-2.0"
] | 17 | 2020-02-25T13:01:11.000Z | 2022-01-19T04:42:43.000Z | # Copyright 2020 Mickael Chen, Edouard Delasalles, Jean-Yves Franceschi, Patrick Gallinari, Sylvain Lamprier
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import numpy as np
import tensorflow as tf
from metrics.fvd.fvd import calculate_fvd, create_id3_embedding, preprocess
def compute_embedding(x):
"""
Computes FVD embeddings of the input video.
"""
with tf.Graph().as_default():
emb = create_id3_embedding(preprocess(tf.convert_to_tensor(x), (224, 224)))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
return sess.run(emb)
def fvd(real, fake):
"""
Computes the FVD score of pair of input real samples (true data) and fake samples (generated by a model).
Parameters
----------
real : torch.*.Tensor
CPU tensor representing samples from the real distribution of shape (length, batch, channels, width, height)
with values in [0, 1].
fake : torch.*.Tensor
CPU tensor representing samples from the fake distribution of shape (length, batch, channels, width, height)
with values in [0, 1].
"""
tf.enable_eager_execution()
# Custom preprocess
n_ex = real.shape[1]
assert n_ex >= 16
if real.shape[2] == 1:
real = real.repeat(1, 1, 3, 1, 1)
fake = fake.repeat(1, 1, 3, 1, 1)
real = real.permute(1, 0, 3, 4, 2).contiguous() * 255
fake = fake.permute(1, 0, 3, 4, 2).contiguous() * 255
# Split data in chunks of size 16 and compute embeddings
embedding_real = []
embedding_fake = []
for k in range(int(math.ceil(n_ex / 16))):
# Select a chunk of size 16
start = k * 16
stop = min(n_ex, (k + 1) * 16)
n_k = stop - start
real_k = real[start:stop]
fake_k = fake[start:stop]
if n_k < 16:
# If we are in the last chunk, we fill the chunk with start data
real_k = torch.cat([real_k, real[:16 - n_k]], 0)
fake_k = torch.cat([fake_k, fake[:16 - n_k]], 0)
# compute embeddings
emb_real_k = compute_embedding(real_k)
emb_fake_k = compute_embedding(fake_k)
if n_k < 16:
# retriev only true data
emb_real_k = emb_real_k[:n_k]
emb_fake_k = emb_fake_k[:n_k]
embedding_real.append(emb_real_k)
embedding_fake.append(emb_fake_k)
embedding_real = np.concatenate(embedding_real, 0)
embedding_fake = np.concatenate(embedding_fake, 0)
# Compute FVD
with tf.Graph().as_default():
embedding_real = tf.convert_to_tensor(embedding_real)
embedding_fake = tf.convert_to_tensor(embedding_fake)
result = calculate_fvd(embedding_real, embedding_fake)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
return sess.run(result)
| 37.208791 | 116 | 0.654164 |
a0570fd1126205a0eb670cf64e479e0d3b0a02d4 | 19,457 | py | Python | superset/connectors/elastic/models.py | zuxqoj/incubator-superset | de5972610998d8faf1dfe2036aee07a2ffbc4509 | [
"Apache-2.0"
] | null | null | null | superset/connectors/elastic/models.py | zuxqoj/incubator-superset | de5972610998d8faf1dfe2036aee07a2ffbc4509 | [
"Apache-2.0"
] | null | null | null | superset/connectors/elastic/models.py | zuxqoj/incubator-superset | de5972610998d8faf1dfe2036aee07a2ffbc4509 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=C,R,W
# pylint: disable=invalid-unary-operand-type
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import json
import logging
from elasticsearch import Elasticsearch
from flask import escape, Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
import pandas as pd
from six import string_types
import sqlalchemy as sa
from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Integer, String,
Text)
from sqlalchemy.orm import backref, relationship
from superset import db, import_util, security_manager, utils
from superset.connectors.base.models import (BaseColumn, BaseDatasource,
BaseMetric)
from superset.models.helpers import AuditMixinNullable, QueryResult, set_perm
from superset.utils import flasher
class ElasticCluster(Model, AuditMixinNullable):
"""ORM object referencing the Elastic clusters"""
__tablename__ = 'elastic_clusters'
type = 'elastic'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
hosts_json = Column(Text)
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
def __repr__(self):
return self.cluster_name
@property
def data(self):
return {
'name': self.cluster_name,
'backend': 'elastic',
}
@property
def hosts(self):
return json.loads(self.hosts_json)
def get_client(self):
return Elasticsearch(self.hosts)
def get_mappings(self):
client = self.get_client()
return client.indices.get_mapping()
def refresh_datasources(self, datasource_name=None, merge_flag=False):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
for index_name, index_metadata in self.get_mappings().items():
for name, mapping_metadata in index_metadata.get('mappings').items():
ElasticDatasource.sync_to_db(
'{}.{}'.format(index_name, name), mapping_metadata, self)
@property
def perm(self):
return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
def get_perm(self):
return self.perm
@property
def name(self):
return self.cluster_name
@property
def unique_name(self):
return self.cluster_name
class ElasticColumn(Model, BaseColumn):
"""ORM model for storing Elastic datasource column metadata"""
__tablename__ = 'elastic_columns'
datasource_name = Column(
String(255),
ForeignKey('elastic_datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'ElasticDatasource',
backref=backref('columns', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
export_fields = (
'datasource_name', 'column_name', 'is_active', 'type', 'groupby',
'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable',
'description',
)
@property
def expression(self):
return self.json
def __repr__(self):
return self.column_name
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
M = ElasticMetric # noqa
metrics = []
metrics.append(ElasticMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'}),
))
if self.sum and self.is_num:
name = 'sum__' + self.column_name
metrics.append(ElasticMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({'sum': {'field': self.column_name}}),
))
if self.avg and self.is_num:
name = 'avg__' + self.column_name
metrics.append(ElasticMetric(
metric_name=name,
metric_type='avg',
verbose_name='AVG({})'.format(self.column_name),
json=json.dumps({'avg': {'field': self.column_name}}),
))
if self.min and self.is_num:
name = 'min__' + self.column_name
metrics.append(ElasticMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({'min': {'field': self.column_name}}),
))
if self.max and self.is_num:
name = 'max__' + self.column_name
metrics.append(ElasticMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({'max': {'field': self.column_name}}),
))
if self.count_distinct:
metrics.append(ElasticMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({'cardinality': {'field': self.column_name}}),
))
session = db.session
new_metrics = []
for metric in metrics:
m = (
session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.datasource_name == self.datasource_name)
.filter(ElasticCluster.cluster_name == self.datasource.cluster_name)
.first()
)
metric.datasource_name = self.datasource_name
if not m:
new_metrics.append(metric)
session.add(metric)
session.flush()
@classmethod
def import_obj(cls, i_column):
def lookup_obj(lookup_column):
return db.session.query(ElasticColumn).filter(
ElasticColumn.datasource_name == lookup_column.datasource_name,
ElasticColumn.column_name == lookup_column.column_name).first()
return import_util.import_simple_obj(db.session, i_column, lookup_obj)
class ElasticMetric(Model, BaseMetric):
"""ORM object referencing Elastic metrics for a datasource"""
__tablename__ = 'elastic_metrics'
datasource_name = Column(
String(255),
ForeignKey('elastic_datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'ElasticDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'datasource_name',
'json', 'description', 'is_restricted', 'd3format',
)
@property
def expression(self):
return self.json
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
'{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.datasource.full_name,
) if self.datasource else None
@classmethod
def import_obj(cls, i_metric):
def lookup_obj(lookup_metric):
return db.session.query(ElasticMetric).filter(
ElasticMetric.datasource_name == lookup_metric.datasource_name,
ElasticMetric.metric_name == lookup_metric.metric_name).first()
return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
class ElasticDatasource(Model, BaseDatasource):
"""ORM object referencing Elastic datasources (tables)"""
__tablename__ = 'elastic_datasources'
type = 'elastic'
query_langtage = 'json'
cluster_class = ElasticCluster
metric_class = ElasticMetric
column_class = ElasticColumn
baselink = 'elasticdatasourcemodelview'
# Columns
datasource_name = Column(String(255), unique=True)
is_hidden = Column(Boolean, default=False)
fetch_values_from = Column(String(100))
cluster_name = Column(
String(250), ForeignKey('elastic_clusters.cluster_name'))
cluster = relationship(
'ElasticCluster', backref='datasources', foreign_keys=[cluster_name])
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
security_manager.user_model,
backref=backref('elastic_datasources', cascade='all, delete-orphan'),
foreign_keys=[user_id])
export_fields = (
'datasource_name', 'is_hidden', 'description', 'default_endpoint',
'cluster_name', 'offset', 'cache_timeout', 'params',
)
slices = relationship(
'Slice',
primaryjoin=(
'ElasticDatasource.id == foreign(Slice.datasource_id) and '
'Slice.datasource_type == "elastic"'))
@property
def database(self):
return self.cluster
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.is_num]
@property
def name(self):
return self.datasource_name
@property
def schema(self):
ds_name = self.datasource_name or ''
name_pieces = ds_name.split('.')
if len(name_pieces) > 1:
return name_pieces[0]
else:
return None
@property
def schema_perm(self):
"""Returns schema permission if present, cluster one otherwise."""
return security_manager.get_schema_perm(self.cluster, self.schema)
def get_perm(self):
return (
'[{obj.cluster_name}].[{obj.datasource_name}]'
'(id:{obj.id})').format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return utils.get_datasource_full_name(
self.cluster_name, self.datasource_name)
@property
def time_column_grains(self):
return {
'time_columns': [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
'time_grains': ['now'],
}
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@classmethod
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d):
return db.session.query(ElasticDatasource).join(ElasticCluster).filter(
ElasticDatasource.datasource_name == d.datasource_name,
ElasticCluster.cluster_name == d.cluster_name,
).first()
def lookup_cluster(d):
return db.session.query(ElasticCluster).filter_by(
cluster_name=d.cluster_name).one()
return import_util.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource,
import_time)
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> ElasticDatasource.version_higher('0.8.2', '0.9.1')
False
>>> ElasticDatasource.version_higher('0.8.2', '0.6.1')
True
>>> ElasticDatasource.version_higher('0.8.2', '0.8.2')
False
>>> ElasticDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> ElasticDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
def query_str(self):
d = {'query': None}
return json.dumps(d)
@classmethod
def sync_to_db(cls, name, metadata, cluster):
"""Fetches metadata for that datasource and merges the Superset db"""
logging.info('Syncing Elastic datasource [{}]'.format(name))
session = db.session
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flasher('Adding new datasource [{}]'.format(name), 'success')
else:
flasher('Refreshing datasource [{}]'.format(name), 'info')
session.flush()
datasource.cluster = cluster
session.flush()
for col_name, col_metadata in metadata.get('properties').items():
cls.merge_column(col_name, col_metadata, datasource, session)
@classmethod
def merge_column(cls, col_name, col_metadata, datasource, sesh):
col_obj = (
sesh
.query(ElasticColumn)
.filter_by(
datasource_name=datasource.datasource_name,
column_name=col_name)
.first()
)
datatype = col_metadata.get('type')
if not col_obj:
col_obj = ElasticColumn(
datasource_name=datasource.datasource_name,
column_name=col_name)
sesh.add(col_obj)
if datatype == 'string':
col_obj.groupby = True
col_obj.filterable = True
if col_obj.is_num:
col_obj.sum = True
if col_obj:
col_obj.type = datatype
sesh.flush()
col_obj.datasource = datasource
col_obj.generate_metrics()
sesh.flush()
@staticmethod
def time_offset(granularity):
if granularity == 'week_ending_saturday':
return 6 * 24 * 3600 * 1000 # 6 days
return 0
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://elastic.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(period_name, timezone=None, origin=None):
if not period_name or period_name == 'all':
return 'all'
iso_8601_dict = {
'5 seconds': 'PT5S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
'1 hour': 'PT1H',
'6 hour': 'PT6H',
'one_day': 'P1D',
'1 day': 'P1D',
'7 days': 'P7D',
'week': 'P1W',
'week_starting_sunday': 'P1W',
'week_ending_saturday': 'P1W',
'month': 'P1M',
}
granularity = {'type': 'period'}
if timezone:
granularity['timeZone'] = timezone
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
if period_name in iso_8601_dict:
granularity['period'] = iso_8601_dict[period_name]
if period_name in ('week_ending_saturday', 'week_starting_sunday'):
# use Sunday as start of the week
granularity['origin'] = '2016-01-03T00:00:00'
elif not isinstance(period_name, string_types):
granularity['type'] = 'duration'
granularity['duration'] = period_name
elif period_name.startswith('P'):
# identify if the string is the iso_8601 period
granularity['period'] = period_name
else:
granularity['type'] = 'duration'
granularity['duration'] = utils.parse_human_timedelta(
period_name).total_seconds() * 1000
return granularity
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
# TODO
def get_query_str(self, query_obj, phase=1, client=None):
return self.run_query(client=client, phase=phase, **query_obj)
def run_query( # noqa / elastic
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, phase=2, client=None, form_data=None):
"""Runs a query against Elastic and returns a dataframe.
"""
pass
@property
def index(self):
self.datasource_name.split('.')[0]
def query(self, query_obj):
client = self.cluster.get_client()
equery = {}
# Aggregations
equery['aggregations'] = {}
for m in self.metrics:
if m.metric_name in query_obj.get('metrics'):
equery['aggregations'][m.metric_name] = m.json_obj
data = client.search(index=self.index, body=equery)
print('-=' * 20)
print('query is: {}'.format(equery))
data = data['hits']['hits']
data = [k['_source'] for k in data]
print('-=' * 20)
query_str = self.query_str()
qry_start_dttm = datetime.now()
df = pd.DataFrame(data)
print('-=' * 20)
print(df)
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
def get_filters(self, raw_filters): # noqa
return
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
return (
session.query(cls)
.filter_by(cluster_name=database.id)
.filter_by(datasource_name=datasource_name)
.all()
)
sa.event.listen(ElasticDatasource, 'after_insert', set_perm)
sa.event.listen(ElasticDatasource, 'after_update', set_perm)
| 33.316781 | 89 | 0.593668 |
12014f7ed1c19585faa1d319d55dadb538d7ca54 | 4,344 | py | Python | airflow/providers/amazon/aws/example_dags/example_eks_with_fargate_profile.py | pyerbiz/airflow | 5216e9cbab29edda3d7510c5b7faea7add4ce08e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | airflow/providers/amazon/aws/example_dags/example_eks_with_fargate_profile.py | pyerbiz/airflow | 5216e9cbab29edda3d7510c5b7faea7add4ce08e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | airflow/providers/amazon/aws/example_dags/example_eks_with_fargate_profile.py | pyerbiz/airflow | 5216e9cbab29edda3d7510c5b7faea7add4ce08e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from os import environ
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.eks import ClusterStates, FargateProfileStates
from airflow.providers.amazon.aws.operators.eks import (
EKSCreateClusterOperator,
EKSCreateFargateProfileOperator,
EKSDeleteClusterOperator,
EKSDeleteFargateProfileOperator,
EKSPodOperator,
)
from airflow.providers.amazon.aws.sensors.eks import EKSClusterStateSensor, EKSFargateProfileStateSensor
CLUSTER_NAME = 'fargate-demo'
FARGATE_PROFILE_NAME = f'{CLUSTER_NAME}-profile'
SELECTORS = environ.get('FARGATE_SELECTORS', [{'namespace': 'default'}])
ROLE_ARN = environ.get('EKS_DEMO_ROLE_ARN', 'arn:aws:iam::123456789012:role/role_name')
SUBNETS = environ.get('EKS_DEMO_SUBNETS', 'subnet-12345ab subnet-67890cd').split(' ')
VPC_CONFIG = {
'subnetIds': SUBNETS,
'endpointPublicAccess': True,
'endpointPrivateAccess': False,
}
with DAG(
dag_id='example_eks_with_fargate_profile_dag',
default_args={'cluster_name': CLUSTER_NAME},
schedule_interval=None,
start_date=datetime(2021, 1, 1),
max_active_runs=1,
tags=['example'],
) as dag:
# Create an Amazon EKS Cluster control plane without attaching a compute service.
create_cluster = EKSCreateClusterOperator(
task_id='create_eks_cluster',
cluster_role_arn=ROLE_ARN,
resources_vpc_config=VPC_CONFIG,
compute=None,
)
await_create_cluster = EKSClusterStateSensor(
task_id='wait_for_create_cluster',
target_state=ClusterStates.ACTIVE,
)
# [START howto_operator_eks_create_fargate_profile]
create_fargate_profile = EKSCreateFargateProfileOperator(
task_id='create_eks_fargate_profile',
pod_execution_role_arn=ROLE_ARN,
fargate_profile_name=FARGATE_PROFILE_NAME,
selectors=SELECTORS,
)
# [END howto_operator_eks_create_fargate_profile]
await_create_fargate_profile = EKSFargateProfileStateSensor(
task_id='wait_for_create_fargate_profile',
fargate_profile_name=FARGATE_PROFILE_NAME,
target_state=FargateProfileStates.ACTIVE,
)
start_pod = EKSPodOperator(
task_id="run_pod",
pod_name="run_pod",
image="amazon/aws-cli:latest",
cmds=["sh", "-c", "echo Test Airflow; date"],
labels={"demo": "hello_world"},
get_logs=True,
# Delete the pod when it reaches its final state, or the execution is interrupted.
is_delete_operator_pod=True,
)
# [START howto_operator_eks_delete_fargate_profile]
delete_fargate_profile = EKSDeleteFargateProfileOperator(
task_id='delete_eks_fargate_profile',
fargate_profile_name=FARGATE_PROFILE_NAME,
)
# [END howto_operator_eks_delete_fargate_profile]
await_delete_fargate_profile = EKSFargateProfileStateSensor(
task_id='wait_for_delete_fargate_profile',
fargate_profile_name=FARGATE_PROFILE_NAME,
target_state=FargateProfileStates.NONEXISTENT,
)
delete_cluster = EKSDeleteClusterOperator(task_id='delete_eks_cluster')
await_delete_cluster = EKSClusterStateSensor(
task_id='wait_for_delete_cluster',
target_state=ClusterStates.NONEXISTENT,
)
(
create_cluster
>> await_create_cluster
>> create_fargate_profile
>> await_create_fargate_profile
>> start_pod
>> delete_fargate_profile
>> await_delete_fargate_profile
>> delete_cluster
>> await_delete_cluster
)
| 35.317073 | 104 | 0.738029 |
509d77c00473377e4236c16b919d664f9651f9b4 | 4,264 | py | Python | python/ray/tune/config_parser.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2018-06-25T08:00:51.000Z | 2018-06-25T08:00:51.000Z | python/ray/tune/config_parser.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2018-01-26T05:11:04.000Z | 2018-01-26T05:11:04.000Z | python/ray/tune/config_parser.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2020-10-16T08:42:32.000Z | 2020-10-16T08:42:32.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
from ray.tune import TuneError
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.trial import Resources
def json_to_resources(data):
if data is None or data == "null":
return None
if type(data) is str:
data = json.loads(data)
for k in data:
if k in ["driver_cpu_limit", "driver_gpu_limit"]:
raise TuneError(
"The field `{}` is no longer supported. Use `extra_cpu` "
"or `extra_gpu` instead.".format(k))
if k not in Resources._fields:
raise TuneError(
"Unknown resource type {}, must be one of {}".format(
k, Resources._fields))
return Resources(
data.get("cpu", 1), data.get("gpu", 0), data.get("extra_cpu", 0),
data.get("extra_gpu", 0))
def resources_to_json(resources):
if resources is None:
return None
return {
"cpu": resources.cpu,
"gpu": resources.gpu,
"extra_cpu": resources.extra_cpu,
"extra_gpu": resources.extra_gpu,
}
def _tune_error(msg):
raise TuneError(msg)
def make_parser(**kwargs):
"""Returns a base argument parser for the ray.tune tool."""
parser = argparse.ArgumentParser(**kwargs)
# Note: keep this in sync with rllib/train.py
parser.add_argument(
"--run",
default=None,
type=str,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
parser.add_argument(
"--stop",
default="{}",
type=json.loads,
help="The stopping criteria, specified in JSON. The keys may be any "
"field in TrainingResult, e.g. "
"'{\"time_total_s\": 600, \"timesteps_total\": 100000}' to stop "
"after 600 seconds or 100k timesteps, whichever is reached first.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams), "
"specified in JSON.")
parser.add_argument(
"--trial-resources",
default=None,
type=json_to_resources,
help="Override the machine resources to allocate per trial, e.g. "
"'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned "
"unless you specify them here. For RLlib, you probably want to "
"leave this alone and use RLlib configs to control parallelism.")
parser.add_argument(
"--repeat",
default=1,
type=int,
help="Number of times to repeat each trial.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"--checkpoint-freq",
default=0,
type=int,
help="How many training iterations between checkpoints. "
"A value of 0 (default) disables checkpointing.")
parser.add_argument(
"--max-failures",
default=3,
type=int,
help="Try to recover a trial from its last checkpoint at least this "
"many times. Only applies if checkpointing is enabled.")
parser.add_argument(
"--scheduler",
default="FIFO",
type=str,
help="FIFO (default), MedianStopping, AsyncHyperBand, "
"HyperBand, or HyperOpt.")
parser.add_argument(
"--scheduler-config",
default="{}",
type=json.loads,
help="Config options to pass to the scheduler.")
# Note: this currently only makes sense when running a single trial
parser.add_argument(
"--restore",
default=None,
type=str,
help="If specified, restore from this checkpoint.")
return parser
| 32.549618 | 79 | 0.605535 |
fd5a25effc5388a0487d34942b144b6890ac6ad3 | 5,801 | py | Python | lib/tests/streamlit/ReportSession_test.py | rajvijay68/streamlit | b94473302f77980ff090ab81fb8a7022388e593e | [
"Apache-2.0"
] | 1 | 2020-03-26T11:38:20.000Z | 2020-03-26T11:38:20.000Z | lib/tests/streamlit/ReportSession_test.py | rubmu/streamlit | 7f15c0f2cb8711a128d1671d73ff297af45f07c0 | [
"Apache-2.0"
] | null | null | null | lib/tests/streamlit/ReportSession_test.py | rubmu/streamlit | 7f15c0f2cb8711a128d1671d73ff297af45f07c0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tornado.gen
import tornado.testing
from mock import MagicMock, patch
from streamlit.ReportSession import ReportSession
from streamlit.ScriptRunner import ScriptRunner
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.StaticManifest_pb2 import StaticManifest
from tests.MockStorage import MockStorage
class ReportSessionTest(unittest.TestCase):
@patch("streamlit.ReportSession.config")
@patch("streamlit.ReportSession.Report")
@patch("streamlit.ReportSession.LocalSourcesWatcher")
def test_enqueue_without_tracer(self, _1, _2, patched_config):
"""Make sure we try to handle execution control requests.
"""
def get_option(name):
if name == "server.runOnSave":
# Just to avoid starting the watcher for no reason.
return False
if name == "client.displayEnabled":
return True
if name == "runner.installTracer":
return False
raise RuntimeError("Unexpected argument to get_option: %s" % name)
patched_config.get_option.side_effect = get_option
rs = ReportSession(None, "", "")
mock_script_runner = MagicMock()
mock_script_runner._install_tracer = ScriptRunner._install_tracer
rs._scriptrunner = mock_script_runner
rs.enqueue({"dontcare": 123})
func = mock_script_runner.maybe_handle_execution_control_request
# Expect func to be called only once, inside enqueue().
func.assert_called_once()
@patch("streamlit.ReportSession.config")
@patch("streamlit.ReportSession.Report")
@patch("streamlit.ReportSession.LocalSourcesWatcher")
def test_enqueue_with_tracer(self, _1, _2, patched_config):
"""Make sure there is no lock contention when tracer is on.
When the tracer is set up, we want
maybe_handle_execution_control_request to be executed only once. There
was a bug in the past where it was called twice: once from the tracer
and once from the enqueue function. This caused a lock contention.
"""
def get_option(name):
if name == "server.runOnSave":
# Just to avoid starting the watcher for no reason.
return False
if name == "client.displayEnabled":
return True
if name == "runner.installTracer":
return True
raise RuntimeError("Unexpected argument to get_option: %s" % name)
patched_config.get_option.side_effect = get_option
rs = ReportSession(None, "", "")
mock_script_runner = MagicMock()
rs._scriptrunner = mock_script_runner
rs.enqueue({"dontcare": 123})
func = mock_script_runner.maybe_handle_execution_control_request
# In reality, outside of a testing environment func should be called
# once. But in this test we're actually not installing a tracer here,
# since Report is mocked. So the correct behavior here is for func to
# never be called. If you ever see it being called once here it's
# likely because there's a bug in the enqueue function (which should
# skip func when installTracer is on).
func.assert_not_called()
def _create_mock_websocket():
@tornado.gen.coroutine
def write_message(*args, **kwargs):
raise tornado.gen.Return(None)
ws = MagicMock()
ws.write_message.side_effect = write_message
return ws
class ReportSessionSerializationTest(tornado.testing.AsyncTestCase):
@patch("streamlit.ReportSession.LocalSourcesWatcher")
@tornado.testing.gen_test
def test_handle_save_request(self, _1):
"""Test that handle_save_request serializes files correctly."""
# Create a ReportSession with some mocked bits
rs = ReportSession(self.io_loop, "mock_report.py", "")
rs._report.report_id = "TestReportID"
rs._scriptrunner = MagicMock()
storage = MockStorage()
rs._storage = storage
# Send two deltas: empty and markdown
rs._main_dg.empty()
rs._main_dg.markdown("Text!")
yield rs.handle_save_request(_create_mock_websocket())
# Check the order of the received files. Manifest should be last.
self.assertEqual(3, len(storage.files))
self.assertEqual("reports/TestReportID/0.pb", storage.get_filename(0))
self.assertEqual("reports/TestReportID/1.pb", storage.get_filename(1))
self.assertEqual("reports/TestReportID/manifest.pb", storage.get_filename(2))
# Check the manifest
manifest = storage.get_message(2, StaticManifest)
self.assertEqual("mock_report", manifest.name)
self.assertEqual(2, manifest.num_messages)
self.assertEqual(StaticManifest.DONE, manifest.server_status)
# Check that the deltas we sent match messages in storage
sent_messages = rs._report._master_queue._queue
received_messages = [
storage.get_message(0, ForwardMsg),
storage.get_message(1, ForwardMsg),
]
self.assertEqual(sent_messages, received_messages)
| 38.417219 | 85 | 0.686433 |
862948610d114c504ce24b421b7f184206baa0b5 | 8,318 | py | Python | tensorflow/python/kernel_tests/bias_op_test.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/python/kernel_tests/bias_op_test.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/python/kernel_tests/bias_op_test.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BiasAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if tf.test.is_gpu_available():
# "NCHW" format is not currently supported on CPU.
test_configs += [("NCHW", True)]
return test_configs
class BiasAddTest(tf.test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
print(inputs.shape)
print(bias.shape)
assert inputs.shape[-1] == bias.shape[0]
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1))
+ [bias.shape[0]])
def testNpBias(self):
self.assertAllClose(np.array([[11, 22, 33], [41, 52, 63]]),
self._npBias(np.array([[10, 20, 30], [40, 50, 60]]),
np.array([1, 2, 3])))
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.nn.bias_add(np_inputs, np_bias).eval()
self.assertAllCloseAccordingToType(np_val, tf_val)
def _AtLeast3d(self, np_value):
# fill the input value to at least 3-dimension
if np_value.ndim < 3:
return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
return np_value
def _NHWCToNCHW(self, np_value):
# fill the input value to at least 3-dimension
np_value = self._AtLeast3d(np_value)
# move the last dimension to third-to-last
np_dim = list(range(np_value.ndim))
np_dim_new = list(np_dim[0:-3]) + list(np_dim[-1:]) + list(np_dim[-3:-1])
return np.transpose(np_value, np_dim_new)
def _NCHWToNHWC(self, np_value):
assert len(np_value.shape) >= 3
np_dim = list(range(np_value.ndim))
# move the third-to-last dimension to the last
np_dim_new = list(np_dim[0:-3]) + list(np_dim[-2:]) + list(np_dim[-3:-2])
return np.transpose(np_value, np_dim_new)
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.nn.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
def _testAll(self, np_inputs, np_bias):
self._testBias(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
self._testBias(np_inputs, np_bias, use_gpu=True)
if tf.test.is_gpu_available():
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
def testInputDims(self):
with self.assertRaises(ValueError):
tf.nn.bias_add([1, 2], [1])
def testBiasVec(self):
with self.assertRaises(ValueError):
tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testBiasInputsMatch(self):
with self.assertRaises(ValueError):
tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1], shape=[1]))
def testIntTypes(self):
for t in [np.int8, np.int16, np.int32, np.int64]:
self._testAll(np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
np.array([1, 2, 3]).astype(t))
def testFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(np.random.rand(4, 3, 3).astype(t),
np.random.rand(3).astype(t))
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
input_tensor = tf.constant(np_input, shape=np_input.shape, dtype=dtype)
bias_tensor = tf.constant(bias, shape=bias.shape, dtype=dtype)
output_tensor = tf.nn.bias_add(input_tensor, bias_tensor,
data_format=data_format)
tensor_jacob_t, tensor_jacob_n = tf.test.compute_gradient(
input_tensor, np_input.shape, output_tensor, np_input.shape)
bias_jacob_t, bias_jacob_n = tf.test.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape)
# Test gradient of BiasAddGrad
bias_add_grad = tf.gradients(tf.nn.l2_loss(output_tensor),
bias_tensor)[0]
grad_jacob_t, grad_jacob_n = tf.test.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape)
if dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
input_tensor = tf.constant(np_input, shape=np_input.shape,
dtype=np.float32)
bias_tensor = tf.constant(bias, shape=bias.shape, dtype=np.float32)
output_tensor = tf.nn.bias_add(input_tensor, bias_tensor,
data_format=data_format)
_, tensor_jacob_n = tf.test.compute_gradient(
input_tensor, np_input.shape, output_tensor, np_input.shape)
_, bias_jacob_n = tf.test.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape)
bias_add_grad = tf.gradients(tf.nn.l2_loss(output_tensor),
bias_tensor)[0]
_, grad_jacob_n = tf.test.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape)
threshold = 2e-3
if dtype == tf.float64:
threshold = 1e-10
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
def testGradientTensor(self):
for (data_format, use_gpu) in GetTestConfigs():
for dtype in (tf.float16, tf.float32, tf.float64):
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
def testGradientTensor4D(self):
for (data_format, use_gpu) in GetTestConfigs():
for dtype in (tf.float16, tf.float32, tf.float64):
np_input = np.arange(1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
[2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
def testEmpty(self):
np.random.seed(7)
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
def testEmptyGradient(self):
for data_format, use_gpu in GetTestConfigs():
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testGradient(np.random.randn(*shape), np.random.randn(shape[-1]),
tf.float64, data_format, use_gpu)
if __name__ == "__main__":
tf.test.main()
| 42.438776 | 80 | 0.644265 |
93cd6219fc824ed7050e241badb44b61669b0bb1 | 9,972 | py | Python | mac/google-cloud-sdk/lib/surface/compute/diagnose/export_logs.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | mac/google-cloud-sdk/lib/surface/compute/diagnose/export_logs.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | mac/google-cloud-sdk/lib/surface/compute/diagnose/export_logs.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Triggers instance to gather logs and upload them to a GCS Bucket."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import datetime
import json
import time
from apitools.base.py.exceptions import HttpError
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.diagnose import diagnose_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.command_lib.util import time_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
import six
_DIAGNOSTICS_METADATA_KEY = 'diagnostics'
_SERVICE_ACCOUNT_NAME = 'gce-diagnostics-extract-logs'
_GCS_LOGS_BUCKET_PREFIX = 'diagnostics_logs_project'
_SUCCESS_MSG = """Log collection has begun.
It may take several minutes for this operation to complete.
Logs will be made available shortly at:
gs://{0}/{1}"""
DETAILED_HELP = {
'EXAMPLES':
"""\
To export logs and upload them to a Cloud Storage Bucket, run:
$ {command} example-instance --zone=us-central1
""",
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class ExportLogs(base_classes.BaseCommand):
"""Triggers instance to gather logs and upload them to a Cloud Storage Bucket.
Gathers logs from a running Compute Engine VM and exports them to a Google
Cloud Storage Bucket. Outputs a path to the logs within the Bucket.
"""
detailed_help = DETAILED_HELP
@classmethod
def Args(cls, parser):
"""See base class."""
instance_flags.INSTANCE_ARG.AddArgument(parser)
parser.add_argument(
'--collect-process-traces',
action='store_true',
help=('Collect a 10 minute trace of the running system. On Windows, '
'this utilizes Windows Performance Recorder. It records CPU, '
'disk, file, and network activity during that time.'))
parser.display_info.AddFormat('none')
return
def Run(self, args):
"""See base class."""
self._diagnose_client = diagnose_utils.DiagnoseClient()
instance_ref = self._ResolveInstance(args)
project = properties.VALUES.core.project.Get(required=True)
service_account = self._GetDiagnosticsServiceAccount(project)
expiration_time = self._GetSignedUrlExpiration()
bucket = self._GetLogBucket(project)
log_path = self._GetLogPath(instance_ref.instance)
url = self._CreateResumableSignedUrl(service_account, expiration_time,
bucket, log_path)
diagnostics_entry = self._ConstructDiagnosticsKeyEntry(
url, args.collect_process_traces)
self._diagnose_client.UpdateMetadata(
project, instance_ref, _DIAGNOSTICS_METADATA_KEY, diagnostics_entry)
log.Print(_SUCCESS_MSG.format(bucket, log_path))
return {'bucket': bucket, 'logPath': log_path, 'signedUrl': url}
def _CreateResumableSignedUrl(self, service_account, expiration, bucket,
filepath):
"""Make a resumable signed url using the SignBlob API of a Service Account.
This creates a signed url that can be used by another program to upload a
single file to the specified bucket with the specified file name.
Args:
service_account: The email of a service account that has permissions to
sign a blob and create files within GCS Buckets.
expiration: The time at which the returned signed url becomes invalid,
measured in seconds since the epoch.
bucket: The name of the bucket the signed url will point to.
filepath: The name or relative path the file will have within the bucket
once uploaded.
Returns:
A string url that can be used until its expiration to upload a file.
"""
url_data = six.ensure_binary(
'POST\n\n\n{0}\nx-goog-resumable:start\n/{1}/{2}'.format(
expiration, bucket, filepath))
signature = six.ensure_binary(
self._diagnose_client.SignBlob(service_account, url_data))
encoded_sig = base64.b64encode(signature)
url = ('https://storage.googleapis.com/'
'{0}/{1}?GoogleAccessId={2}&Expires={3}&Signature={4}')
url_suffix = six.moves.urllib.parse.quote_plus(encoded_sig)
return url.format(bucket, filepath, service_account, expiration, url_suffix)
def _GetDiagnosticsServiceAccount(self, project):
"""Locates or creates a service account with the correct permissions.
Attempts to locate the service account meant for creating the signed url.
If not found, it will subsequently create the service account. It will then
give the service account the correct IAM permissions to create a signed url
to a GCS Bucket.
Args:
project: The project to search for the service account in.
Returns:
A string email of the service account to use.
"""
# Search for service account by name.
service_account = None
for account in self._diagnose_client.ListServiceAccounts(project):
if account.email.startswith('{}@'.format(_SERVICE_ACCOUNT_NAME)):
service_account = account.email
if service_account is None:
service_account = self._diagnose_client.CreateServiceAccount(
project, _SERVICE_ACCOUNT_NAME)
# We can apply the correct IAM permissions for accessing the GCS Bucket
# regardless of whether or not the account already has them.
project_ref = projects_util.ParseProject(project)
service_account_ref = 'serviceAccount:{}'.format(service_account)
projects_api.AddIamPolicyBinding(project_ref, service_account_ref,
'roles/storage.objectCreator')
projects_api.AddIamPolicyBinding(project_ref, service_account_ref,
'roles/storage.objectViewer')
return service_account
def _GetSignedUrlExpiration(self, hours=1):
"""Generate a string expiration time based on some hours in the future.
Args:
hours: The number of hours in the future for your timestamp to represent
Returns:
A string timestamp measured in seconds since the epoch.
"""
expiration = datetime.datetime.now() + datetime.timedelta(hours=hours)
expiration_seconds = time.mktime(expiration.timetuple())
return six.text_type(int(expiration_seconds))
def _GetLogBucket(self, project_id):
"""Locates or creates the GCS Bucket for logs associated with the project.
Args:
project_id: The id number of the project the bucket is associated with.
Returns:
The name of the GCS Bucket.
"""
project_number = self._GetProjectNumber(project_id)
bucket_name = '{}_{}'.format(_GCS_LOGS_BUCKET_PREFIX, project_number)
bucket = self._diagnose_client.FindBucket(project_id, bucket_name)
if bucket is None:
bucket = self._diagnose_client.CreateBucketWithLifecycle(days_to_live=10)
bucket.name = bucket_name
suffix = 0
# We can't guarantee that our chosen bucket name isn't already taken, so
# we may have to try multiple suffixes before we generate a unique name.
bucket_name_taken = True
while bucket_name_taken:
try:
self._diagnose_client.InsertBucket(project_id, bucket)
bucket_name_taken = False
except HttpError as e:
# Error 409 means that bucket name already exists.
if e.status_code != 409:
raise e
bucket.name = '{}_{}'.format(bucket_name, suffix)
suffix += 1
return bucket.name
def _GetProjectNumber(self, project_id):
"""Converts a project id to a project number."""
project_ref = projects_util.ParseProject(project_id)
project = projects_api.Get(project_ref)
return project.projectNumber
def _GetLogPath(self, instance):
"""Creates a timestamped filename that should be realistically unique."""
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
return '{}-logs-{}.zip'.format(instance, timestamp)
def _ResolveInstance(self, args):
"""Resolves the arguments into an instance.
Args:
args: The command line arguments.
Returns:
An instance reference to a VM.
"""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
compute_client = holder.client
resources = holder.resources
instance_ref = instance_flags.INSTANCE_ARG.ResolveAsResource(
args,
resources,
scope_lister=instance_flags.GetInstanceZoneScopeLister(compute_client))
return instance_ref
def _ConstructDiagnosticsKeyEntry(self, signed_url, trace):
"""Generates a JSON String that is a command for the VM to extract the logs.
Args:
signed_url: The url where the logs can be uploaded.
trace: Whether or not to take a 10 minute trace on the VM.
Returns:
A JSON String that can be written to the metadata server to trigger the
extraction of logs.
"""
expire_str = time_util.CalculateExpiration(300)
diagnostics_key_data = {
'signedUrl': signed_url,
'trace': trace,
'expireOn': expire_str
}
return json.dumps(diagnostics_key_data, sort_keys=True)
| 38.061069 | 80 | 0.717308 |
35d54dd2df8af7d48f462f758fa1bc5e22645121 | 2,416 | py | Python | src/test/python/programmingtheiot/part03/integration/app/DeviceDataManagerWithMqttClientOnly.py | NULishengZhang/piot-python-components | 006674bc42443bb2a843bfd7dfa5b55be9843961 | [
"MIT"
] | 6 | 2021-06-15T20:30:53.000Z | 2022-01-20T20:09:41.000Z | src/test/python/programmingtheiot/part03/integration/app/DeviceDataManagerWithMqttClientOnly.py | NULishengZhang/piot-python-components | 006674bc42443bb2a843bfd7dfa5b55be9843961 | [
"MIT"
] | null | null | null | src/test/python/programmingtheiot/part03/integration/app/DeviceDataManagerWithMqttClientOnly.py | NULishengZhang/piot-python-components | 006674bc42443bb2a843bfd7dfa5b55be9843961 | [
"MIT"
] | 9 | 2020-11-19T20:05:44.000Z | 2022-02-25T05:17:31.000Z | #####
#
# This class is part of the Programming the Internet of Things
# project, and is available via the MIT License, which can be
# found in the LICENSE file at the top level of this repository.
#
# Copyright (c) 2020 by Andrew D. King
#
import logging
import unittest
from time import sleep
from programmingtheiot.cda.app.DeviceDataManager import DeviceDataManager
from programmingtheiot.cda.connection.MqttClientConnector import MqttClientConnector
from programmingtheiot.common.ResourceNameEnum import ResourceNameEnum
from programmingtheiot.data.DataUtil import DataUtil
from programmingtheiot.data.ActuatorData import ActuatorData
class DeviceDataManagerWithCommsTest(unittest.TestCase):
"""
This test case class contains very basic integration tests for
DeviceDataManager. It should not be considered complete,
but serve as a starting point for the student implementing
additional functionality within their Programming the IoT
environment.
NOTE: This test MAY require the sense_emu_gui to be running,
depending on whether or not the 'enableEmulator' flag is
True within the ConstraineDevice section of PiotConfig.props.
If so, it must have access to the underlying libraries that
support the pisense module. On Windows, one way to do
this is by installing pisense and sense-emu within the
Bash on Ubuntu on Windows environment and then execute this
test case from the command line, as it will likely fail
if run within an IDE in native Windows.
"""
@classmethod
def setUpClass(self):
logging.basicConfig(format = '%(asctime)s:%(module)s:%(levelname)s:%(message)s', level = logging.DEBUG)
logging.info("Testing DeviceDataManager class...")
def setUp(self):
pass
def tearDown(self):
pass
#@unittest.skip("Ignore for now.")
def testStartAndStopManagerWithMqtt(self):
"""
NOTE: Be sure to enable CoAP by setting the following flag to True
within PiotConfig.props
enableMqttClient = True
enableCoapClient = False
"""
ddMgr = DeviceDataManager()
ddMgr.startManager()
mqttClient = MqttClientConnector()
mqttClient.connectClient()
ad = ActuatorData()
ad.setCommand(1)
adJson = DataUtil().actuatorDataToJson(ad)
mqttClient.publishMessage(ResourceNameEnum.CDA_ACTUATOR_CMD_RESOURCE, msg = adJson, qos = 1)
sleep(10)
mqttClient.disconnectClient()
ddMgr.stopManager()
if __name__ == "__main__":
unittest.main()
| 29.463415 | 105 | 0.772351 |
70fd40d70910570216d7a4b381607a86284a5a89 | 6,500 | py | Python | pykt/plugins.py | div72/py2many | 60277bc13597bd32d078b88a7390715568115fc6 | [
"MIT"
] | 1 | 2021-05-14T00:40:10.000Z | 2021-05-14T00:40:10.000Z | pykt/plugins.py | div72/py2many | 60277bc13597bd32d078b88a7390715568115fc6 | [
"MIT"
] | 1 | 2021-07-07T05:29:15.000Z | 2021-07-07T05:29:15.000Z | pykt/plugins.py | div72/py2many | 60277bc13597bd32d078b88a7390715568115fc6 | [
"MIT"
] | null | null | null | import io
import os
import ast
import functools
import re
import sys
import textwrap
from tempfile import NamedTemporaryFile
from typing import Callable, Dict, List, Tuple, Union
try:
from argparse_dataclass import dataclass as ap_dataclass
from argparse_dataclass import ArgumentParser
except:
ArgumentParser = "ArgumentParser"
ap_dataclass = "ap_dataclass"
class KotlinTranspilerPlugins:
def visit_argparse_dataclass(self, node):
fields = []
for (
declaration,
typename_with_default,
) in node.declarations_with_defaults.items():
typename, default_value = typename_with_default
if typename == None:
return None
if default_value is not None and typename != "bool":
default_value = self.visit(default_value)
default_value = f', default_value = "{default_value}"'
else:
default_value = ""
fields.append(
f"#[structopt(short, long{default_value})]\npub {declaration}: {typename},"
)
fields = "\n".join(fields)
self._usings.add("structopt::StructOpt")
clsdef = "\n" + textwrap.dedent(
f"""\
#[derive(Debug, StructOpt)]
#[structopt(name = "{self._module}", about = "Placeholder")]
struct {node.name} {{
{fields}
}}
"""
)
return clsdef
def visit_open(self, node, vargs):
self._usings.add("std::fs::File")
if len(vargs) > 1:
self._usings.add("std::fs::OpenOptions")
mode = vargs[1]
opts = "OpenOptions::new()"
is_binary = "b" in mode
for c in mode:
if c == "w":
if not is_binary:
self._usings.add("pylib::FileWriteString")
opts += ".write(true)"
if c == "r":
if not is_binary:
self._usings.add("pylib::FileReadString")
opts += ".read(true)"
if c == "a":
opts += ".append(true)"
if c == "+":
opts += ".read(true).write(true)"
node.result_type = True
return f"{opts}.open({vargs[0]})"
node.result_type = True
return f"File::open({vargs[0]})"
def visit_named_temp_file(self, node, vargs):
node.annotation = ast.Name(id="tempfile._TemporaryFileWrapper")
node.result_type = True
return "NamedTempFile::new()"
def visit_textio_read(self, node, vargs):
# TODO
return None
def visit_textio_write(self, node, vargs):
# TODO
return None
def visit_ap_dataclass(self, cls):
# Do whatever transformation the decorator does to cls here
return cls
def visit_range(self, node, vargs: List[str]) -> str:
if len(node.args) == 1:
return "(0..{}-1)".format(vargs[0])
elif len(node.args) == 2:
return "({}..{}-1)".format(vargs[0], vargs[1])
elif len(node.args) == 3:
return "({}..{}-1 step {})".format(vargs[0], vargs[1], vargs[2])
raise Exception(
"encountered range() call with unknown parameters: range({})".format(vargs)
)
def visit_print(self, node, vargs: List[str]) -> str:
def _format(arg):
if arg.isdigit():
return arg
if re.match(r"'.*'", arg) or re.match(r'".*"', arg):
return arg[1:-1]
else:
return f"${arg}"
vargs_str = " ".join([f"{_format(arg)}" for arg in vargs])
return f'println("{vargs_str}")'
def visit_min_max(self, node, vargs, is_max: bool) -> str:
min_max = "max" if is_max else "min"
self._usings.add(f"kotlin.math.{min_max}")
self._typename_from_annotation(node.args[0])
if hasattr(node.args[0], "container_type"):
return f"maxOf({vargs[0]})"
else:
all_vargs = ", ".join(vargs)
return f"{min_max}({all_vargs})"
def visit_floor(self, node, vargs) -> str:
self._usings.add("kotlin.math.floor")
return f"floor({vargs[0]}).toInt()"
# small one liners are inlined here as lambdas
SMALL_DISPATCH_MAP = {
"str": lambda n, vargs: f"{vargs[0]}.toString()",
# TODO: strings use .length
"len": lambda n, vargs: f"{vargs[0]}.size",
"int": lambda n, vargs: f"{vargs[0]}.toInt()",
"bool": lambda n, vargs: f"({vargs[0]} != 0)",
"reversed": lambda n, vargs: f"{vargs[0]}.reversed()",
}
SMALL_USINGS_MAP: Dict[str, str] = {}
DISPATCH_MAP = {
"max": functools.partial(KotlinTranspilerPlugins.visit_min_max, is_max=True),
"min": functools.partial(KotlinTranspilerPlugins.visit_min_max, is_max=False),
"range": KotlinTranspilerPlugins.visit_range,
"xrange": KotlinTranspilerPlugins.visit_range,
"print": KotlinTranspilerPlugins.visit_print,
"floor": KotlinTranspilerPlugins.visit_floor,
}
MODULE_DISPATCH_TABLE: Dict[str, str] = {}
DECORATOR_DISPATCH_TABLE = {ap_dataclass: KotlinTranspilerPlugins.visit_ap_dataclass}
CLASS_DISPATCH_TABLE = {ap_dataclass: KotlinTranspilerPlugins.visit_argparse_dataclass}
ATTR_DISPATCH_TABLE = {
"temp_file.name": lambda self, node, value, attr: f"{value}.path()",
}
FuncType = Union[Callable, str]
FUNC_DISPATCH_TABLE: Dict[FuncType, Tuple[Callable, bool]] = {
# Uncomment after upstream uploads a new version
# ArgumentParser.parse_args: lambda node: "Opts::parse_args()",
# HACKs: remove all string based dispatch here, once we replace them with type based
"parse_args": (lambda self, node, vargs: "::from_args()", False),
"f.read": (lambda self, node, vargs: "f.read_string()", True),
"f.write": (lambda self, node, vargs: f"f.write_string({vargs[0]})", True),
"f.close": (lambda self, node, vargs: "drop(f)", False),
open: (KotlinTranspilerPlugins.visit_open, True),
NamedTemporaryFile: (KotlinTranspilerPlugins.visit_named_temp_file, True),
io.TextIOWrapper.read: (KotlinTranspilerPlugins.visit_textio_read, True),
io.TextIOWrapper.read: (KotlinTranspilerPlugins.visit_textio_write, True),
os.unlink: (lambda self, node, vargs: f"std::fs::remove_file({vargs[0]})", True),
sys.exit: (
lambda self, node, vargs: f"kotlin.system.exitProcess({vargs[0]})",
True,
),
}
| 35.519126 | 91 | 0.591231 |
0849aabaf35496294b60e56c3bd52d900e72fe85 | 3,208 | py | Python | backend/scripts/load_neighborhoods.py | violetaria/saveourfaves-server | f8777b137c2fb8a715afa3408a0a081cec3b93b9 | [
"MIT"
] | 1 | 2020-03-26T18:14:51.000Z | 2020-03-26T18:14:51.000Z | backend/scripts/load_neighborhoods.py | violetaria/saveourfaves-server | f8777b137c2fb8a715afa3408a0a081cec3b93b9 | [
"MIT"
] | 2 | 2020-03-26T19:37:49.000Z | 2020-03-27T00:01:26.000Z | backend/scripts/load_neighborhoods.py | violetaria/saveourfaves-server | f8777b137c2fb8a715afa3408a0a081cec3b93b9 | [
"MIT"
] | null | null | null | import json
import django
import sys
import os
# os.environ['DJANGO_SETTINGS_MODULE'] = 'carebackend.settings.base'
sys.path.append(os.path.dirname(__file__) + '/..')
django.setup()
from places.models import Neighborhood, NeighborhoodEntry, Place, Area
from django.contrib.gis.geos import Polygon
import pandas as pd
from places.google_places_helper import fetch_details_for_place_id
fl = sys.argv[1]
area_to_use = sys.argv[2]
insert_if_not_found = sys.argv[3] == 'yes' if len(sys.argv) > 3 else False
area = Area.objects.get(key=area_to_use)
df = pd.read_csv(fl)
for _, row in df.iterrows():
print("Processing", row['Neighborhood'])
db_key = row.get('DB Key', "_".join(row['Neighborhood'].split()).lower())
# overwrite area if it's there
if row.get("Area") and not pd.isna(row['Area']):
area = Area.objects.get(key=row.get("Area"))
try:
n = Neighborhood.objects.get(key=db_key)
except Neighborhood.DoesNotExist:
if insert_if_not_found:
n = Neighborhood(name=row['Neighborhood'])
n.key = db_key
else:
print("No DB Key match and not inserting, continuing...")
continue
if row.get('GeoJSON') and not pd.isna(row['GeoJSON']):
if row['GeoJSON'].startswith('[[['):
row['GeoJSON'] = row['GeoJSON'][1:-1]
if not row['GeoJSON'].startswith('[['):
row['GeoJSON'] = '[%s]' % row['GeoJSON']
geo_json = json.loads(row['GeoJSON'])
n.bounds = Polygon(geo_json)
poly = ShapelyPolygon(geo_json)
centroid = poly.centroid
lat = centroid.y
lng = centroid.x
elif row.get('Location') and not pd.isna(row['Location']):
lat,lng = [x.strip() for x in row['Location'].split(',')]
elif row.get('Geometry') and not pd.isna(row['Geometry']):
geometry_json = json.loads(row['Geometry'])
xmin = geometry_json['geometry']['viewport']['southwest']['lng']
ymin = geometry_json['geometry']['viewport']['southwest']['lat']
xmax = geometry_json['geometry']['viewport']['northeast']['lng']
ymax = geometry_json['geometry']['viewport']['northeast']['lat']
bbox = (xmin, ymin, xmax, ymax)
n.bounds = Polygon.from_bbox(bbox)
lat = geometry_json['geometry']['location']['lat']
lng = geometry_json['geometry']['location']['lng']
elif row.get('Place Id') and not pd.isna(row['Place Id']):
place_id = row['Place Id']
r, photo_url, photo_attrib = fetch_details_for_place_id(place_id)
geometry_json = r['geometry']
xmin = geometry_json['viewport']['southwest']['lng']
ymin = geometry_json['viewport']['southwest']['lat']
xmax = geometry_json['viewport']['northeast']['lng']
ymax = geometry_json['viewport']['northeast']['lat']
bbox = (xmin, ymin, xmax, ymax)
n.bounds = Polygon.from_bbox(bbox)
lat = geometry_json['location']['lat']
lng = geometry_json['location']['lng']
else:
print("missing necessary data!")
continue
n.lat = lat
n.lng = lng
n.area = area
n.rank = row.get('Rank') if not pd.isna(row.get('Rank')) else None
n.save()
| 40.1 | 77 | 0.619701 |
04d7c4b2c296579f1473a38f3e38845256f986c5 | 40,802 | py | Python | registry/testcases/functional_testcases/test_service.py | anandrgitnirman/snet-marketplace-service | f31bf741094476b9cb26277f1165deb2856257b1 | [
"MIT"
] | 14 | 2019-02-12T09:14:52.000Z | 2021-03-11T18:42:22.000Z | registry/testcases/functional_testcases/test_service.py | prashantramangupta/snet-marketplace-service | 7c293054e4b0207deefecc46defd743c064472a4 | [
"MIT"
] | 1,079 | 2019-01-10T04:31:24.000Z | 2022-03-29T06:16:42.000Z | registry/testcases/functional_testcases/test_service.py | prashantramangupta/snet-marketplace-service | 7c293054e4b0207deefecc46defd743c064472a4 | [
"MIT"
] | 20 | 2018-12-18T13:06:41.000Z | 2021-09-17T11:13:01.000Z | import json
from datetime import datetime as dt
from unittest import TestCase
from unittest.mock import patch
from uuid import uuid4
from common.constant import StatusCode
from registry.application.handlers.service_handlers import create_service
from registry.application.handlers.service_handlers import get_daemon_config_for_current_network
from registry.application.handlers.service_handlers import get_service_for_service_uuid
from registry.application.handlers.service_handlers import get_services_for_organization
from registry.application.handlers.service_handlers import publish_service_metadata_to_ipfs
from registry.application.handlers.service_handlers import save_service
from registry.application.handlers.service_handlers import save_service_attributes, verify_service_id
from registry.application.handlers.service_handlers import save_transaction_hash_for_published_service
from registry.constants import EnvironmentType
from registry.constants import OrganizationMemberStatus
from registry.constants import Role
from registry.constants import ServiceAvailabilityStatus
from registry.constants import ServiceStatus
from registry.domain.factory.service_factory import ServiceFactory
from registry.infrastructure.models import Organization as OrganizationDBModel
from registry.infrastructure.models import OrganizationMember as OrganizationMemberDBModel
from registry.infrastructure.models import OrganizationState as OrganizationStateDBModel
from registry.infrastructure.models import Service as ServiceDBModel
from registry.infrastructure.models import ServiceGroup as ServiceGroupDBModel
from registry.infrastructure.models import ServiceReviewHistory as ServiceReviewHistoryDBModel
from registry.infrastructure.models import ServiceState as ServiceStateDBModel
from registry.infrastructure.models import OffchainServiceConfig as OffchainServiceConfigDBModel
from registry.infrastructure.repositories.organization_repository import OrganizationPublisherRepository
from registry.infrastructure.repositories.service_publisher_repository import ServicePublisherRepository
org_repo = OrganizationPublisherRepository()
service_repo = ServicePublisherRepository()
class TestService(TestCase):
def setUp(self):
pass
def test_verify_service_id(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "karl@dummy.io",
"address": "0x123"
},
{
"username": "trax@dummy.io",
"address": "0x234"
},
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
proto={},
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
assets={},
rating={},
ranking=1,
contributors=[],
created_on=dt.utcnow(),
updated_on=dt.utcnow()
)
)
event = {
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "GET",
"pathParameters": {"org_uuid": "test_org_uuid"},
"queryStringParameters": {"service_id": "test_service_id"}
}
response = verify_service_id(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"] == ServiceAvailabilityStatus.UNAVAILABLE.value)
event = {
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "GET",
"pathParameters": {"org_uuid": "test_org_uuid"},
"queryStringParameters": {"service_id": "new_test_service_id"}
}
response = verify_service_id(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"] == ServiceAvailabilityStatus.AVAILABLE.value)
def test_create_service(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
event = {
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "POST",
"pathParameters": {"org_uuid": "test_org_uuid"},
"body": json.dumps({"display_name": "test_display_name"})
}
response = create_service(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["org_uuid"] == "test_org_uuid")
def test_get_services_for_organization(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "karl@dummy.io",
"address": "0x123"
},
{
"username": "trax@dummy.io",
"address": "0x234"
},
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
ranking=1,
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceStateDBModel(
row_id=1000,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
state="DRAFT",
transaction_hash=None,
created_by="dummy_user",
updated_by="dummy_user",
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceGroupDBModel(
row_id="1000",
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
group_id="test_group_id",
pricing={},
endpoints={"https://dummydaemonendpoint.io": {"verfied": True}},
daemon_address=["0xq2w3e4rr5t6y7u8i9"],
free_calls=10,
free_call_signer_address="",
created_on=dt.utcnow()
)
)
event = {
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "GET",
"pathParameters": {"org_uuid": "test_org_uuid"},
"body": json.dumps({
"q": "display",
"limit": 10,
"offset": 0,
"s": "all",
"sort_by": "display_name",
"order_by": "desc",
"filters": []
})
}
response = get_services_for_organization(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["total_count"] == 1)
assert (response_body["data"]["offset"] == 0)
assert (response_body["data"]["limit"] == 10)
assert (len(response_body["data"]["result"]) == 1)
def test_save_service(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "karl@dummy.io",
"address": "0x123"
},
{
"username": "trax@dummy.io",
"address": "0x234"
},
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
ranking=1,
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceStateDBModel(
row_id=1000,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
state=ServiceStatus.DRAFT.value,
created_by="dummy_user",
updated_by="dummy_user",
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceGroupDBModel(
row_id="1000",
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
group_id="test_group_id",
endpoints={"https://dummydaemonendpoint.io": {"verfied": True}},
daemon_address=["0xq2w3e4rr5t6y7u8i9"],
free_calls=10,
free_call_signer_address="0xq2s3e4r5t6y7u8i9o0",
created_on=dt.utcnow()
)
)
event = {
"path": "/org/test_org_uuid/service",
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "PUT",
"pathParameters": {"org_uuid": "test_org_uuid", "service_uuid": "test_service_uuid"},
"body": json.dumps({
"description": "test description updated 1",
"service_id": "test_service_id",
"assets":{"demo_files": {"required": 1}},
"groups": [
{
"group_name": "defaultGroup",
"group_id": "l/hp6f1RXFPANeLWFZYwTB93Xi42S8NpZHfnceS6eUw=",
"free_calls": 10,
"free_call_signer_address": "0x7DF35C98f41F3Af0df1dc4c7F7D4C19a71Dd059F",
"pricing": [
{
"default": True,
"price_model": "fixed_price",
"price_in_cogs": 1
}
],
"endpoints": {}
}
]
})
}
service_repo.add_item(OffchainServiceConfigDBModel(
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
parameter_name="demo_component_required",
parameter_value="0",
created_on="2021-07-19 12:13:55",
updated_on="2021-07-19 12:13:55"
))
response = save_service(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["service_uuid"] == "test_service_uuid")
assert (response_body["data"]["service_state"]["state"] == ServiceStatus.APPROVED.value)
assert (response_body["data"]["media"]["demo_files"]) == {"required": 1}
event = {
"path": "/org/test_org_uuid/service",
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "PUT",
"pathParameters": {"org_uuid": "test_org_uuid", "service_uuid": "test_service_uuid"},
"body": json.dumps({
"description": "test description updated 2",
"service_id": "test_service_id",
"groups": [
{
"group_name": "defaultGroup",
"group_id": "l/hp6f1RXFPANeLWFZYwTB93Xi42S8NpZHfnceS6eUw=",
"free_calls": 20,
"free_call_signer_address": "0x7DF35C98f41F3Af0df1dc4c7F7D4C19a71Dd059F",
"pricing": [
{
"default": True,
"price_model": "fixed_price",
"price_in_cogs": 2
}
],
"endpoints": {}
}
]
})
}
response = save_service(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["service_uuid"] == "test_service_uuid")
assert (response_body["data"]["service_state"]["state"] == ServiceStatus.APPROVED.value)
def test_get_service_for_service_uuid(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "karl@dummy.io",
"address": "0x123"
},
{
"username": "trax@dummy.io",
"address": "0x234"
},
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
ranking=1,
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceStateDBModel(
row_id=1000,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
state=ServiceStatus.DRAFT.value,
created_by="dummy_user",
updated_by="dummy_user",
created_on=dt.utcnow()
)
)
service_repo.add_item(
OffchainServiceConfigDBModel(
row_id=10,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
parameter_name="demo_component_required",
parameter_value=0,
created_on=dt.utcnow(),
updated_on=dt.utcnow()
)
)
event = {
"path": "/org/test_org_uuid/service",
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "GET",
"pathParameters": {"org_uuid": "test_org_uuid", "service_uuid": "test_service_uuid"}
}
response = get_service_for_service_uuid(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["org_uuid"] == "test_org_uuid")
assert (response_body["data"]["service_uuid"] == "test_service_uuid")
assert (response_body["data"]["service_state"]["state"] == ServiceStatus.DRAFT.value)
assert (response_body["data"]["media"]) == {
"demo_files": {
"required": 0
}
}
def test_save_transaction_hash_for_published_service(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "karl@dummy.io",
"address": "0x123"
},
{
"username": "trax@dummy.io",
"address": "0x234"
},
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
ranking=1,
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceStateDBModel(
row_id=1000,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
state=ServiceStatus.APPROVED.value,
created_by="dummy_user",
updated_by="dummy_user",
created_on=dt.utcnow()
)
)
event = {
"path": "/org/test_org_uuid/service/test_service_uuid/transaction",
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "POST",
"pathParameters": {"org_uuid": "test_org_uuid", "service_uuid": "test_service_uuid"},
"body": json.dumps({"transaction_hash": "0xtest_trxn_hash"})
}
response = save_transaction_hash_for_published_service(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"] == StatusCode.OK)
def test_daemon_config_for_test_and_main_environment(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
org_repo.add_item(
OrganizationStateDBModel(
org_uuid="test_org_uuid",
state="PUBLISHED",
created_by="dummy_user1@dummy.io",
updated_by="dummy_user1@dummy.io"
)
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
ranking=1,
proto={"proto_files": {
"url": "https://ropsten-marketplace-service-assets.s3.amazonaws.com/test_org_uuid/services/test_service_uuid/assets/20200212111248_proto_files.zip"}},
contributors={"email_id": "prashant@singularitynet.io"},
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceStateDBModel(
row_id=1000,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
state=ServiceStatus.DRAFT.value,
created_by="dummy_user",
updated_by="dummy_user",
created_on=dt.utcnow()
)
)
event = {"path": "/org/test_org_uuid/service/test_service_uuid/group_id/test_group_id/daemon/config",
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
}, "httpMethod": "GET",
"pathParameters": {"org_uuid": "test_org_uuid", "service_uuid": "test_service_uuid",
"group_id": "test_group_id"},
"queryStringParameters": {"network": EnvironmentType.MAIN.value}}
response = get_daemon_config_for_current_network(event, "")
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["blockchain_enabled"] is True)
assert (response_body["data"]["passthrough_enabled"] is True)
def test_service_to_metadata(self):
payload = {"service_id": "sdfadsfd1", "display_name": "new_service_123",
"short_description": "sadfasd", "description": "dsada", "project_url": "df",
"proto": {},
"assets": {"proto_files": {
"url": "https://ropsten-marketplace-service-assets.s3.amazonaws.com/9887ec2e099e4afd92c4a052737eaa97/services/7420bf47989e4afdb1797d1bba8090aa/proto/20200327130256_proto_files.zip",
"ipfs_hash": "QmUfDprFisFeaRnmLEqks1AFN6iam5MmTh49KcomXHEiQY"}, "hero_image": {
"url": "https://ropsten-marketplace-service-assets.s3.amazonaws.com/9887ec2e099e4afd92c4a052737eaa97/services/7420bf47989e4afdb1797d1bba8090aa/assets/20200323130126_asset.png",
"ipfs_hash": ""}, "demo_files": {
"url": "https://ropsten-marketplace-service-assets.s3.amazonaws.com/9887ec2e099e4afd92c4a052737eaa97/services/7420bf47989e4afdb1797d1bba8090aa/component/20200401121414_component.zip",
"ipfs_hash": "QmUfDprFisFeaRnmLEqks1AFN6iam5MmTh49KcomXHEiQY"}},
"contributors": [{"name": "df", "email_id": ""}], "groups": [
{"group_name": "default_group", "group_id": "a+8V4tUs+DBnZfxoh2vBHVv1pAt8pkCac8mpuKFltTo=",
"free_calls": 23, "free_call_signer_address": "0x7DF35C98f41F3Af0df1dc4c7F7D4C19a71Dd059F",
"pricing": [{"default": True, "price_model": "fixed_price", "price_in_cogs": 1}],
"endpoints": {"https://example-service-a.singularitynet.io:8085": {"valid": False}},
"test_endpoints": ["https://example-service-a.singularitynet.io:8085"],
"daemon_addresses": ["https://example-service-a.singularitynet.io:8085"]}], "tags": ["adsf"],
"comments": {"SERVICE_PROVIDER": "", "SERVICE_APPROVER": "<div></div>"},
"mpe_address": "0x8fb1dc8df86b388c7e00689d1ecb533a160b4d0c"}
service = ServiceFactory.create_service_entity_model("", "", payload, ServiceStatus.APPROVED.value)
service_metadata = service.to_metadata()
assert service_metadata == {
"version": 1,
"display_name": "new_service_123",
"encoding": "",
"service_type": "",
"model_ipfs_hash": "",
"mpe_address": "0x8fb1dc8df86b388c7e00689d1ecb533a160b4d0c",
"groups": [
{
"free_calls": 23,
"free_call_signer_address": "0x7DF35C98f41F3Af0df1dc4c7F7D4C19a71Dd059F",
"daemon_addresses": ["https://example-service-a.singularitynet.io:8085"],
"pricing": [
{"default": True, "price_model": "fixed_price", "price_in_cogs": 1}
],
"endpoints": ["https://example-service-a.singularitynet.io:8085"],
"group_id": "a+8V4tUs+DBnZfxoh2vBHVv1pAt8pkCac8mpuKFltTo=",
"group_name": "default_group"
}
],
"service_description": {
"url": "df",
"short_description": "sadfasd",
"description": "dsada"
},
"media": [
{
"order": 1,
"url": "https://ropsten-marketplace-service-assets.s3.amazonaws.com/9887ec2e099e4afd92c4a052737eaa97/services/7420bf47989e4afdb1797d1bba8090aa/assets/20200323130126_asset.png",
"file_type": "image",
"asset_type": "hero_image",
"alt_text": ""
}
],
'tags': ['adsf'],
"contributors": [{"name": "df", "email_id": ""}]
}
def test_save_service_attributes(self):
org_repo.add_item(
OrganizationDBModel(
name="test_org",
org_id="test_org_id",
uuid="test_org_uuid",
org_type="organization",
description="that is the dummy org for testcases",
short_description="that is the short description",
url="https://dummy.url",
contacts=[],
assets={},
duns_no=12345678,
origin="PUBLISHER_DAPP",
groups=[],
addresses=[],
metadata_ipfs_uri="#dummyhashdummyhash"
)
)
new_org_members = [
{
"username": "karl@dummy.io",
"address": "0x123"
},
{
"username": "trax@dummy.io",
"address": "0x234"
},
{
"username": "dummy_user1@dummy.io",
"address": "0x345"
}
]
org_repo.add_all_items(
[
OrganizationMemberDBModel(
username=member["username"],
org_uuid="test_org_uuid",
role=Role.MEMBER.value,
address=member["address"],
status=OrganizationMemberStatus.ACCEPTED.value,
transaction_hash="0x123",
invite_code=str(uuid4()),
invited_on=dt.utcnow(),
updated_on=dt.utcnow()
) for member in new_org_members
]
)
service_repo.add_item(
ServiceDBModel(
org_uuid="test_org_uuid",
uuid="test_service_uuid",
display_name="test_display_name",
service_id="test_service_id",
metadata_uri="Qasdfghjklqwertyuiopzxcvbnm",
short_description="test_short_description",
description="test_description",
project_url="https://dummy.io",
ranking=1,
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceStateDBModel(
row_id=1000,
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
state=ServiceStatus.APPROVAL_PENDING.value,
created_by="dummy_user",
updated_by="dummy_user",
created_on=dt.utcnow()
)
)
service_repo.add_item(
ServiceGroupDBModel(
row_id="1000",
org_uuid="test_org_uuid",
service_uuid="test_service_uuid",
group_id="test_group_id",
endpoints={"https://dummydaemonendpoint.io": {"verfied": True}},
daemon_address=["0xq2w3e4rr5t6y7u8i9"],
free_calls=10,
free_call_signer_address="0xq2s3e4r5t6y7u8i9o0",
created_on=dt.utcnow()
)
)
event = {
"path": "/org/test_org_uuid/service",
"requestContext": {
"authorizer": {
"claims": {
"email": "dummy_user1@dummy.io"
}
}
},
"httpMethod": "PUT",
"pathParameters": {"org_uuid": "test_org_uuid", "service_uuid": "test_service_uuid"},
"body": json.dumps({
"groups": [
{
"group_name": "defaultGroup",
"group_id": "l/hp6f1RXFPANeLWFZYwTB93Xi42S8NpZHfnceS6eUw=",
"free_calls": 15,
"free_call_signer_address": "0x7DF35C98f41F3Af0df1dc4c7F7D4C19a71Dd059F",
"pricing": [
{
"default": True,
"price_model": "fixed_price",
"price_in_cogs": 1
}
],
"endpoints": {
"https://example-service-a.singularitynet.io:8010": {
"valid": False
},
"https://example-service-a.singularitynet.io:8013": {
"valid": False
},
"https://example-service-a.singularitynet.io:8011": {
"valid": True
}
},
}
]
})
}
response = save_service_attributes(event=event, context=None)
assert (response["statusCode"] == 200)
response_body = json.loads(response["body"])
assert (response_body["status"] == "success")
assert (response_body["data"]["service_uuid"] == "test_service_uuid")
assert (response_body["data"]["service_state"]["state"] == ServiceStatus.APPROVAL_PENDING.value)
assert (response_body["data"]['groups'] == [
{'group_id': 'l/hp6f1RXFPANeLWFZYwTB93Xi42S8NpZHfnceS6eUw=', 'group_name': 'defaultGroup',
'endpoints': {'https://example-service-a.singularitynet.io:8010': {'valid': False},
'https://example-service-a.singularitynet.io:8013': {'valid': False},
'https://example-service-a.singularitynet.io:8011': {'valid': True}}, 'test_endpoints': [],
'pricing': [{'default': True, 'price_model': 'fixed_price', 'price_in_cogs': 1}], 'free_calls': 15,
'free_call_signer_address': '0x7DF35C98f41F3Af0df1dc4c7F7D4C19a71Dd059F', 'daemon_addresses': []}])
def tearDown(self):
org_repo.session.query(OrganizationStateDBModel).delete()
org_repo.session.query(OrganizationMemberDBModel).delete()
org_repo.session.query(OrganizationDBModel).delete()
org_repo.session.query(ServiceDBModel).delete()
org_repo.session.query(ServiceGroupDBModel).delete()
org_repo.session.query(ServiceStateDBModel).delete()
org_repo.session.query(ServiceReviewHistoryDBModel).delete()
org_repo.session.commit()
| 40.438057 | 206 | 0.50451 |
59633741ddc8bb1c0a10e758bccf83f03d85ac65 | 2,735 | py | Python | Assignment_3_chaos_and_pendulums/Pre-GitHub-versions/Phys440_Assignment03_Prob2 (3).py | KayaBaber/Computational-Physics | 1117733d33f9035a8e9a137bfdb88478bf477d78 | [
"MIT"
] | null | null | null | Assignment_3_chaos_and_pendulums/Pre-GitHub-versions/Phys440_Assignment03_Prob2 (3).py | KayaBaber/Computational-Physics | 1117733d33f9035a8e9a137bfdb88478bf477d78 | [
"MIT"
] | null | null | null | Assignment_3_chaos_and_pendulums/Pre-GitHub-versions/Phys440_Assignment03_Prob2 (3).py | KayaBaber/Computational-Physics | 1117733d33f9035a8e9a137bfdb88478bf477d78 | [
"MIT"
] | null | null | null | '''
Kaya Baber
Physics 440 - Computational Physics
Assignment 3
Problem 2
'''
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
import math
def f(thetas, t, b, gamma, omega):
#pendulum driven-damped function
theta=thetas[0]
thetaDot=thetas[1]
thetaDouble=-b*thetaDot - math.sin(theta) + gamma*math.cos(omega*t)
return thetaDot, thetaDouble
#initial conditions
theta0=-0.0
thetaDot0=0.0
thetas=[theta0,thetaDot0]
#constants
b=0.05
omega=0.7
#computation parameters
steps=100
periods=100
t = np.linspace(0, periods*(math.pi*2.0*omega), steps*periods+1)
#generating loop
for i in range(7):
gamma=0.4+(i*0.1)
#ODE solution
sol = odeint(f, thetas, t, args=(b, gamma, omega))
#TAKE THE STROBE
#plot theta vs time
plt.plot(t, sol[:, 1], 'b', label='thetaDot(t)')
plt.xlabel('time')
plt.ylabel('theta-Dot')
plt.grid()
plt.savefig('/Users/student/kbaber/Desktop/Phys440/Assignment 3/plots//gamma'+str(gamma)+'_thetaDot_t.png',bbox_inches='tight')
#plt.savefig('\Users\Kaya\Google Drive\School\Phys 440\Assignments\Assignment 3\plots\\gamma'+str(gamma)+'_thetaDot_t.png',bbox_inches='tight')
#plt.show()
plt.clf()
#clips the plot to keep theta between -pi and +pi
thetaLog=((np.array(sol[:,0])+math.pi)%(2*math.pi))-math.pi
#plot phase space plot
plt.plot(thetaLog, sol[:, 1], 'g.', label='theta-Dot(theta)')
plt.xlabel('theta')
plt.ylabel('theta-Dot')
plt.title('Phase Space Plot')
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig('/Users/student/kbaber/Desktop/Phys440/Assignment 3/plots//gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.savefig('\Users\Kaya\Google Drive\School\Phys 440\Assignments\Assignment 3\plots\\gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.show()
plt.clf()
#selects only points that coincide with the period omega
strobedTheta=sol[:,0][0:-1:steps]
strobedThetaDot=sol[:,1][0:-1:steps]
strobedTheta=((strobedTheta+math.pi)%(2*math.pi))-math.pi
#plot strobed phase space plot
plt.plot(strobedTheta, strobedThetaDot, 'r.', label='theta-Dot(theta)')
plt.xlabel('theta')
plt.ylabel('theta-Dot')
plt.title('Strobed Phase Space Plot')
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig('/Users/student/kbaber/Desktop/Phys440/Assignment 3/plots//gamma'+str(gamma)+'_thetaDot_theta_strobed.png',bbox_inches='tight')
#plt.savefig('\Users\Kaya\Google Drive\School\Phys 440\Assignments\Assignment 3\plots\\gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.show()
plt.clf()
| 31.079545 | 151 | 0.684461 |
eefcd4d1244008525aa53e3f3d2d021f4b29b40d | 4,210 | py | Python | locallibrary/settings.py | skupriienko/django_local_library | 2bc2b380b806b6d83bd02cafe0370c835f55269b | [
"MIT"
] | null | null | null | locallibrary/settings.py | skupriienko/django_local_library | 2bc2b380b806b6d83bd02cafe0370c835f55269b | [
"MIT"
] | null | null | null | locallibrary/settings.py | skupriienko/django_local_library | 2bc2b380b806b6d83bd02cafe0370c835f55269b | [
"MIT"
] | null | null | null | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'p#4gv(fbjp#1plyru=n0-ed0hdq)e59h)4ba-a5*46$4(z_@1s'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'p#4gv(fbjp#1plyru=n0-ed0hdq)e59h)4ba-a5*46$4(z_@1s')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = ['secret-shelf-01811.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = BASE_DIR / 'staticfiles' #. os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env) | 29.236111 | 102 | 0.718527 |
1f1256410ab8d9acb1d856b473029ce665a7ef85 | 93 | py | Python | backend/.venv/lib/python3.7/enum.py | yszar/flask-vue-case | c8dd46f9b58a51c330aca048b22181f09c7b2782 | [
"MIT"
] | 2 | 2019-01-25T18:18:59.000Z | 2019-01-28T17:20:59.000Z | backend/.venv/lib/python3.7/enum.py | yszar/flask-vue-case | c8dd46f9b58a51c330aca048b22181f09c7b2782 | [
"MIT"
] | 19 | 2018-11-23T06:43:42.000Z | 2019-04-28T00:32:47.000Z | backend/.venv/lib/python3.7/enum.py | yszar/flask-vue-case | c8dd46f9b58a51c330aca048b22181f09c7b2782 | [
"MIT"
] | 1 | 2020-03-25T09:27:23.000Z | 2020-03-25T09:27:23.000Z | /usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/enum.py | 93 | 93 | 0.806452 |
bd9f8d77a2d6eb145b9fa5e877337d0caace1ae2 | 10,686 | py | Python | picknmix/picknmix.py | FarazFe/picknmix | 3225b72be177b72036a1404c506f4806e9ca0f37 | [
"MIT"
] | null | null | null | picknmix/picknmix.py | FarazFe/picknmix | 3225b72be177b72036a1404c506f4806e9ca0f37 | [
"MIT"
] | null | null | null | picknmix/picknmix.py | FarazFe/picknmix | 3225b72be177b72036a1404c506f4806e9ca0f37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Pick n Mix is a simple stacking tool for stacking Sci-Kit learn models
of your picks.
It provided 2 classes:
Layer and Stack. Layer is a parallel combination of models,
while Stack combine Layers to create a stacking model"""
from copy import deepcopy
import numpy as np
import warnings
import importlib
class Layer:
def __init__(self, models, preprocessors=None, proba=False):
"""Initialize Layer, create a parallel combination of Sci-Kit learn models
with or without preprocessors
Parameters
==========
preprocessors:
A list of picks from sklearn.preprocessing, if not none.
the number of preprocessors and models must match.
If preprocessing not used for a model, None need to be in place.
models:
A list of picks from sklearn models
proba:
Bool or a list of bool to show if predict_proba should be use
instaed of predict, useful for classifiers not in the final Layer.
If is a list,the length must match the number models.
"""
if preprocessors is not None:
assert len(preprocessors) == len(models), """Number of
preprocessors and models does not match, got {} processors but
{} models.""".format(len(preprocessors), len(models))
if type(proba) != bool:
assert len(proba) == len(models), """Length of proba and number of
models does not match, got {} processors but {} models.""".format(
len(proba), len(models))
self.width = len(models)
if preprocessors is None:
self.preprocessors = [None] * self.width
else:
self.preprocessors = deepcopy(preprocessors)
self.models = deepcopy(models)
if type(proba) == bool:
self.proba = [proba] * self.width
else:
self.proba = deepcopy(proba)
def fit(self, X, y):
"""Fit each preprocessors and models in Layer with (X, y) and
return predictions in an array of shape (n_samples, n_models) for
the next Layer
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data
y : array_like, shape (n_samples, n_targets)
Target values.
Returns
=======
C : array, shape (n_samples, n_models)
Returns predicted values for the next layer.
"""
result = None
for idx in range(self.width):
if self.preprocessors[idx] is not None:
X_new = self.preprocessors[idx].fit_transform(X)
else:
X_new = X
self.models[idx].fit(X_new, y)
if self.proba[idx]:
if _method_checker(self.models[idx], 'predict_proba'):
temp_result = self.models[idx].predict_proba(X_new)
else:
warnings.warn("""Warning: predict_proba not exist for
{}, using predict instead""".format(
self.models[idx].__class__))
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
else:
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
if result is None:
result = temp_result
else:
result = np.concatenate((result, temp_result), axis=1)
return result
def predict(self, X):
"""With put fiting any preprocessors and models in Layer, return predictions
of X in an array of shape (n_samples, n_models) for the next Layer
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples
Returns
=======
C : array, shape (n_samples, n_models)
Returns predicted values for the next layer.
"""
result = None
for idx in range(self.width):
if self.preprocessors[idx] is not None:
X_new = self.preprocessors[idx].transform(X)
else:
X_new = X
if self.proba[idx]:
if _method_checker(self.models[idx], 'predict_proba'):
temp_result = self.models[idx].predict_proba(X_new)
else:
warnings.warn("""Warning: predict_proba not exist for {},
using predict instead""".format(
self.models[idx].__class__))
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
else:
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
if result is None:
result = temp_result
else:
result = np.concatenate((result, temp_result), axis=1)
return result
def _isSklearnEstimator(self, estimator):
""" Checks whether the given object is an estimator of sklearn-library (Code from sklearn.base.clone())
"""
return hasattr(estimator, 'get_params') and not isinstance(estimator, type)
def _cloneObject(self, estimator, moduleObject=None):
"""Abstract method for cloning a presumed estimator object
"""
copyEstimator = None
if estimator is not None:
if self._isSklearnEstimator(estimator) and moduleObject is not None and "sklearn" in moduleObject:
cloneMethod = getattr(moduleObject["sklearn"], "clone")
copyEstimator = cloneMethod(estimator)
else: copyEstimator = deepcopy(estimator)
return copyEstimator
def copy(self):
"""Copies the Layer's shape as it has not been trained before
Returns
=======
the copy of the Layer
"""
copyPreprocessors = []
copyModels = []
try:
#package is defined here once and passed to _cloneObject. When further modules are required, further imports will be necessary
moduleObject = {"sklearn": importlib.import_module("sklearn.base")}
except(ImportError):
moduleObject = None
for preprocessor in self.preprocessors:
copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)
copyPreprocessors.append(copyPrep)
for model in self.models:
copyModel = self._cloneObject(model, moduleObject=moduleObject)
copyModels.append(copyModel)
return Layer(models=copyModels, preprocessors=copyPreprocessors)
class Stack:
def __init__(self, layers, folds=None):
"""Initialize Stack, create a vertical stacking of Layers
Parameters
==========
layers : a list of Layers
folds: it could be either KFold, GroupKFold, StratifiedKFold
or TimeSeriesSplit cross-validator from sci-kit learn;
or a custom list of sets of index for different folds.
If None (default) all data will be used in training all layers.
"""
self.depth = len(layers)
self.layers = deepcopy(layers)
self.use_folds = False
self.folds = None
self.splitter = None
if folds is not None:
if _check_custom_folds(folds):
self.use_folds = True
self.folds = folds
if len(folds) != self.depth:
raise AssertionError(
"There are {} folds but {} layers".format(
len(folds), self.depth))
elif _method_checker(folds, 'get_n_splits') and _method_checker(
folds, 'split'):
self.use_folds = True
self.splitter = folds
if self.splitter.get_n_splits() != self.depth:
warnings.warn("""Warning: Number of fold is not the same
as number of layers, using the number of layers as
number of flods""")
self.splitter.n_splits = self.depth
else:
raise AssertionError("{} is not a valid input".format(folds))
def fit(self, X, y):
"""Fit Layers with (X, y) and return the fitted Stack
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data
y : array_like, shape (n_samples, n_targets)
Target values.
Returns
=======
self : obejct, the fitted Stack itself
"""
if self.use_folds:
if self.folds is None:
_, self.folds = self.splitter.split(X, y)
X_new = X[self.folds[0]]
y_new = y[self.folds[0]]
else:
X_new = X
for idx in range(self.depth):
if self.use_folds:
for pre_idx in range(idx):
X_new = self.layers[pre_idx].predict(X_new)
self.layers[idx].fit(X_new, y_new)
if idx < self.depth - 1:
X_new = X[self.folds[idx + 1]]
y_new = y[self.folds[idx + 1]]
else:
X_new = self.layers[idx].fit(X_new, y)
return self
def predict(self, X):
"""With given X, predict the result with the Stack
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
=======
C : array, shape (n_samples,)
Returns predicted values from the Stack.
"""
X_new = X
for idx in range(self.depth):
X_new = self.layers[idx].predict(X_new)
# flatten result if only a number for each X
if X_new.shape[1] == 1:
X_new = X_new.flatten()
return X_new
def copy(self):
"""Copies the Stack's shape as it has not been trained before
Returns
=======
the copy of the Stack
"""
copyLayers = []
for idx in range(self.depth):
copyLayers.append(self.layers[idx].copy())
return Stack(layers=copyLayers)
def _method_checker(obj, method_name):
return method_name in dir(obj)
def _check_custom_folds(obj):
try:
return isinstance(obj[0][0], int)
except TypeError:
return False | 36.223729 | 138 | 0.561669 |
f91ce3bd9ccecbbd09de28d8400de5eedb03ba57 | 646 | py | Python | nomnom/migrations/0011_auto_20200414_1638.py | tluderer/nomnom-server | 0cdfe9a6d873d87edda56fad27b8dae99b317ab7 | [
"MIT"
] | null | null | null | nomnom/migrations/0011_auto_20200414_1638.py | tluderer/nomnom-server | 0cdfe9a6d873d87edda56fad27b8dae99b317ab7 | [
"MIT"
] | 4 | 2021-04-14T15:40:03.000Z | 2021-04-14T15:40:36.000Z | nomnom/migrations/0011_auto_20200414_1638.py | tluderer/nomnom-server | 0cdfe9a6d873d87edda56fad27b8dae99b317ab7 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.1 on 2020-04-14 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nomnom', '0010_auto_20200414_1631'),
]
operations = [
migrations.AlterField(
model_name='ingredientset',
name='amount',
field=models.PositiveSmallIntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='ingredientset',
name='unit',
field=models.CharField(default='', max_length=32),
preserve_default=False,
),
]
| 24.846154 | 62 | 0.589783 |
08b20c4bcadfbb49f301222e13604c78dd4fc6ca | 290 | py | Python | jaxns/prior_transforms/__init__.py | fehiepsi/jaxns | 9cf9366f11ace564e21f938edf4d090fb5de137d | [
"Apache-2.0"
] | null | null | null | jaxns/prior_transforms/__init__.py | fehiepsi/jaxns | 9cf9366f11ace564e21f938edf4d090fb5de137d | [
"Apache-2.0"
] | null | null | null | jaxns/prior_transforms/__init__.py | fehiepsi/jaxns | 9cf9366f11ace564e21f938edf4d090fb5de137d | [
"Apache-2.0"
] | null | null | null | from jaxns.prior_transforms.common import *
from jaxns.prior_transforms.deterministic import *
from jaxns.prior_transforms.identifiable import *
from jaxns.prior_transforms.levy import *
from jaxns.prior_transforms.mixture import *
from jaxns.prior_transforms.prior_chain import PriorChain
| 41.428571 | 57 | 0.858621 |
7466e8138b6f5775e0669ccd61e5f3004e9249a4 | 35,975 | py | Python | evalml/tests/automl_tests/test_iterative_algorithm.py | peterataylor/evalml | 917f07845c4a319bb08c7aaa8df9e09623df11c8 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/automl_tests/test_iterative_algorithm.py | peterataylor/evalml | 917f07845c4a319bb08c7aaa8df9e09623df11c8 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/automl_tests/test_iterative_algorithm.py | peterataylor/evalml | 917f07845c4a319bb08c7aaa8df9e09623df11c8 | [
"BSD-3-Clause"
] | null | null | null | from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from skopt.space import Categorical, Integer, Real
from evalml.automl.automl_algorithm import (
AutoMLAlgorithmException,
IterativeAlgorithm,
)
from evalml.model_family import ModelFamily
from evalml.pipelines import (
BinaryClassificationPipeline,
Estimator,
StackedEnsembleClassifier,
StackedEnsembleRegressor,
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.problem_types import ProblemTypes
@pytest.fixture
def dummy_binary_pipeline_classes():
def _method(hyperparameters=["default", "other"]):
class MockEstimator(Estimator):
name = "Mock Classifier"
model_family = ModelFamily.RANDOM_FOREST
supported_problem_types = [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]
if isinstance(hyperparameters, (list, tuple, Real, Categorical, Integer)):
hyperparameter_ranges = {"dummy_parameter": hyperparameters}
else:
hyperparameter_ranges = {"dummy_parameter": [hyperparameters]}
def __init__(
self, dummy_parameter="default", n_jobs=-1, random_seed=0, **kwargs
):
super().__init__(
parameters={
"dummy_parameter": dummy_parameter,
**kwargs,
"n_jobs": n_jobs,
},
component_obj=None,
random_seed=random_seed,
)
allowed_component_graphs = {
"graph_1": [MockEstimator],
"graph_2": [MockEstimator],
"graph_3": [MockEstimator],
}
return [
BinaryClassificationPipeline([MockEstimator]),
BinaryClassificationPipeline([MockEstimator]),
BinaryClassificationPipeline([MockEstimator]),
], allowed_component_graphs
return _method
def test_iterative_algorithm_init(
X_y_binary,
):
X, y = X_y_binary
algo = IterativeAlgorithm(X=X, y=y, problem_type="binary")
assert algo.pipeline_number == 0
assert algo.batch_number == 0
estimators = get_estimators("binary")
assert len(algo.allowed_pipelines) == len(
[
make_pipeline(
X,
y,
estimator,
"binary",
)
for estimator in estimators
]
)
def test_make_iterative_algorithm_custom_hyperparameters_error(
dummy_binary_pipeline_classes, X_y_binary
):
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
X, y = X_y_binary
custom_hyperparameters = [
{"Imputer": {"numeric_imput_strategy": ["median"]}},
{"One Hot Encoder": {"value1": ["value2"]}},
]
with pytest.raises(
ValueError, match="If custom_hyperparameters provided, must be of type dict"
):
IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
custom_hyperparameters=custom_hyperparameters,
)
def test_iterative_algorithm_allowed_pipelines(
X_y_binary, dummy_binary_pipeline_classes
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
)
assert algo.pipeline_number == 0
assert algo.batch_number == 0
assert algo.allowed_pipelines == dummy_binary_pipeline_classes
def test_iterative_algorithm_empty(X_y_binary, dummy_binary_pipeline_classes):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
with pytest.raises(ValueError, match="No allowed pipelines to search"):
IterativeAlgorithm(X=X, y=y, problem_type="binary", allowed_component_graphs={})
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
)
algo.allowed_pipelines = []
assert algo.pipeline_number == 0
assert algo.batch_number == 0
assert algo.allowed_pipelines == []
next_batch = algo.next_batch()
assert [p.__class__ for p in next_batch] == []
assert algo.pipeline_number == 0
assert algo.batch_number == 1
with pytest.raises(
AutoMLAlgorithmException, match="No results were reported from the first batch"
):
algo.next_batch()
assert algo.batch_number == 1
assert algo.pipeline_number == 0
@pytest.mark.parametrize("ensembling_value", [True, False])
@patch("evalml.tuners.skopt_tuner.Optimizer.tell")
def test_iterative_algorithm_results(
mock_opt_tell,
ensembling_value,
dummy_binary_pipeline_classes,
X_y_binary,
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
ensembling=ensembling_value,
)
assert algo.pipeline_number == 0
assert algo.batch_number == 0
assert algo.allowed_pipelines == dummy_binary_pipeline_classes
# initial batch contains one of each pipeline, with default parameters
next_batch = algo.next_batch()
assert len(next_batch) == len(dummy_binary_pipeline_classes)
assert [p.__class__ for p in next_batch] == [
p.__class__ for p in dummy_binary_pipeline_classes
]
assert algo.pipeline_number == len(dummy_binary_pipeline_classes)
assert algo.batch_number == 1
assert all(
[p.parameters == p.component_graph.default_parameters for p in next_batch]
)
# the "best" score will be the 1st dummy pipeline
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
# subsequent batches contain pipelines_per_batch copies of one pipeline, moving from best to worst from the first batch
last_batch_number = algo.batch_number
last_pipeline_number = algo.pipeline_number
all_parameters = []
for i in range(1, 5):
for _ in range(len(dummy_binary_pipeline_classes)):
next_batch = algo.next_batch()
assert len(next_batch) == algo.pipelines_per_batch
num_pipelines_classes = (
(len(dummy_binary_pipeline_classes) + 1)
if ensembling_value
else len(dummy_binary_pipeline_classes)
)
cls = dummy_binary_pipeline_classes[
(algo.batch_number - 2) % num_pipelines_classes
].__class__
assert [p.__class__ for p in next_batch] == [cls] * len(next_batch)
assert all(
[p.parameters["Mock Classifier"]["n_jobs"] == -1 for p in next_batch]
)
assert all((p.random_seed == algo.random_seed) for p in next_batch)
assert algo.pipeline_number == last_pipeline_number + len(next_batch)
last_pipeline_number = algo.pipeline_number
assert algo.batch_number == last_batch_number + 1
last_batch_number = algo.batch_number
all_parameters.extend([p.parameters for p in next_batch])
scores = -np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
assert any(
[p != dummy_binary_pipeline_classes[0].parameters for p in all_parameters]
)
if ensembling_value:
# check next batch is stacking ensemble batch
assert algo.batch_number == (len(dummy_binary_pipeline_classes) + 1) * i
next_batch = algo.next_batch()
assert len(next_batch) == 1
assert algo.batch_number == last_batch_number + 1
last_batch_number = algo.batch_number
assert algo.pipeline_number == last_pipeline_number + 1
last_pipeline_number = algo.pipeline_number
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
assert pipeline.model_family == ModelFamily.ENSEMBLE
assert pipeline.random_seed == algo.random_seed
estimators_used_in_ensemble = pipeline.component_graph.get_estimators()
random_seeds_the_same = [
(estimator.random_seed == algo.random_seed)
for estimator in estimators_used_in_ensemble
]
assert all(random_seeds_the_same)
assert ModelFamily.ENSEMBLE not in algo._best_pipeline_info
@patch("evalml.tuners.skopt_tuner.Optimizer.tell")
def test_iterative_algorithm_passes_pipeline_params(
mock_opt_tell,
X_y_binary,
dummy_binary_pipeline_classes,
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
pipeline_params={
"pipeline": {"gap": 2, "max_delay": 10, "forecast_horizon": 3}
},
)
next_batch = algo.next_batch()
assert all(
[
p.parameters["pipeline"]
== {"gap": 2, "max_delay": 10, "forecast_horizon": 3}
for p in next_batch
]
)
# the "best" score will be the 1st dummy pipeline
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
for i in range(1, 5):
for _ in range(len(dummy_binary_pipeline_classes)):
next_batch = algo.next_batch()
assert all(
[
p.parameters["pipeline"]
== {"gap": 2, "max_delay": 10, "forecast_horizon": 3}
for p in next_batch
]
)
scores = -np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
@patch("evalml.tuners.skopt_tuner.Optimizer.tell")
def test_iterative_algorithm_passes_njobs(
mock_opt_tell, X_y_binary, dummy_binary_pipeline_classes
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
n_jobs=2,
ensembling=False,
)
next_batch = algo.next_batch()
# the "best" score will be the 1st dummy pipeline
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
for i in range(1, 3):
for _ in range(len(dummy_binary_pipeline_classes)):
next_batch = algo.next_batch()
assert all(
[p.parameters["Mock Classifier"]["n_jobs"] == 2 for p in next_batch]
)
scores = -np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
@patch("evalml.tuners.skopt_tuner.Optimizer.tell")
@pytest.mark.parametrize("is_regression", [True, False])
@pytest.mark.parametrize("estimator", ["XGBoost", "CatBoost"])
def test_iterative_algorithm_passes_n_jobs_catboost_xgboost(
mock_opt_tell, X_y_binary, X_y_regression, is_regression, estimator
):
if estimator == "XGBoost":
pytest.importorskip(
"xgboost", reason="Skipping test because xgboost is not installed."
)
else:
pytest.importorskip(
"catboost", reason="Skipping test because catboost is not installed."
)
if is_regression:
X, y = X_y_regression
component_graphs = {"graph": [f"{estimator} Regressor"]}
problem_type = "regression"
else:
X, y = X_y_binary
component_graphs = {"graph": [f"{estimator} Classifier"]}
problem_type = "binary"
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type=problem_type,
allowed_component_graphs=component_graphs,
n_jobs=2,
ensembling=False,
)
next_batch = algo.next_batch()
# the "best" score will be the 1st dummy pipeline
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
for _ in range(1, 3):
for _ in range(len(component_graphs)):
next_batch = algo.next_batch()
for parameter_values in [list(p.parameters.values()) for p in next_batch]:
assert parameter_values[0]["n_jobs"] == 2
scores = -np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
@pytest.mark.parametrize("ensembling_value", [True, False])
def test_iterative_algorithm_one_allowed_pipeline(
X_y_binary, ensembling_value, dummy_binary_pipeline_classes
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
dummy_binary_pipeline_classes = [dummy_binary_pipeline_classes[0]]
allowed_component_graphs = {"graph_1": allowed_component_graphs["graph_1"]}
# Checks that when len(allowed_pipeline) == 1, ensembling is not run, even if set to True
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
ensembling=ensembling_value,
)
assert algo.pipeline_number == 0
assert algo.batch_number == 0
assert algo.allowed_pipelines == dummy_binary_pipeline_classes
# initial batch contains one of each pipeline, with default parameters
next_batch = algo.next_batch()
assert len(next_batch) == 1
assert [p.__class__ for p in next_batch] == [
p.__class__ for p in dummy_binary_pipeline_classes
]
assert algo.pipeline_number == 1
assert algo.batch_number == 1
assert all(
[p.parameters == p.component_graph.default_parameters for p in next_batch]
)
# the "best" score will be the 1st dummy pipeline
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
# subsequent batches contain pipelines_per_batch copies of one pipeline, moving from best to worst from the first batch
last_batch_number = algo.batch_number
last_pipeline_number = algo.pipeline_number
all_parameters = []
for i in range(1, 5):
next_batch = algo.next_batch()
assert len(next_batch) == algo.pipelines_per_batch
assert all((p.random_seed == algo.random_seed) for p in next_batch)
assert [p.__class__ for p in next_batch] == [
dummy_binary_pipeline_classes[0].__class__
] * algo.pipelines_per_batch
assert algo.pipeline_number == last_pipeline_number + len(next_batch)
last_pipeline_number = algo.pipeline_number
assert algo.batch_number == last_batch_number + 1
last_batch_number = algo.batch_number
all_parameters.extend([p.parameters for p in next_batch])
scores = -np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
assert any(
[
p
!= dummy_binary_pipeline_classes[0]
.__class__({})
.component_graph.default_parameters
for p in all_parameters
]
)
@pytest.mark.parametrize("text_in_ensembling", [True, False])
@pytest.mark.parametrize("n_jobs", [-1, 0, 1, 2, 3])
def test_iterative_algorithm_stacked_ensemble_n_jobs_binary(
n_jobs,
X_y_binary,
text_in_ensembling,
dummy_binary_pipeline_classes,
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
ensembling=True,
text_in_ensembling=text_in_ensembling,
n_jobs=n_jobs,
)
next_batch = algo.next_batch()
seen_ensemble = False
scores = range(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
for i in range(5):
next_batch = algo.next_batch()
for pipeline in next_batch:
if isinstance(pipeline.estimator, StackedEnsembleClassifier):
seen_ensemble = True
if text_in_ensembling:
assert (
pipeline.parameters["Stacked Ensemble Classifier"]["n_jobs"]
== 1
)
else:
assert (
pipeline.parameters["Stacked Ensemble Classifier"]["n_jobs"]
== n_jobs
)
assert seen_ensemble
@pytest.mark.parametrize("text_in_ensembling", [True, False])
@pytest.mark.parametrize("n_jobs", [-1, 0, 1, 2, 3])
def test_iterative_algorithm_stacked_ensemble_n_jobs_regression(
n_jobs, text_in_ensembling, X_y_regression, linear_regression_pipeline_class
):
X, y = X_y_regression
allowed_component_graphs = {
"graph_1": linear_regression_pipeline_class.component_graph,
"graph_2": linear_regression_pipeline_class.component_graph,
}
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="regression",
allowed_component_graphs=allowed_component_graphs,
ensembling=True,
text_in_ensembling=text_in_ensembling,
n_jobs=n_jobs,
)
next_batch = algo.next_batch()
seen_ensemble = False
scores = range(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
for i in range(5):
next_batch = algo.next_batch()
for pipeline in next_batch:
if isinstance(pipeline.estimator, StackedEnsembleRegressor):
seen_ensemble = True
if text_in_ensembling:
assert (
pipeline.parameters["Stacked Ensemble Regressor"]["n_jobs"] == 1
)
else:
assert (
pipeline.parameters["Stacked Ensemble Regressor"]["n_jobs"]
== n_jobs
)
assert seen_ensemble
@pytest.mark.parametrize(
"parameters",
[1, "hello", 1.3, -1.0006, Categorical([1, 3, 4]), Integer(2, 4), Real(2, 6)],
)
def test_iterative_algorithm_pipeline_params(
X_y_binary,
parameters,
dummy_binary_pipeline_classes,
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes(parameters)
if isinstance(parameters, (Categorical, Integer, Real)):
with pytest.raises(
ValueError,
match="Pipeline parameters should not contain skopt.Space variables",
):
IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
random_seed=0,
pipeline_params={
"pipeline": {"gap": 2, "max_delay": 10, "forecast_horizon": 3},
"Mock Classifier": {"dummy_parameter": parameters},
},
)
return
else:
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
random_seed=0,
pipeline_params={
"pipeline": {"gap": 2, "max_delay": 10, "forecast_horizon": 3},
"Mock Classifier": {"dummy_parameter": parameters},
},
)
parameter = parameters
next_batch = algo.next_batch()
assert all(
[
p.parameters["pipeline"]
== {"gap": 2, "max_delay": 10, "forecast_horizon": 3}
for p in next_batch
]
)
assert all(
[
p.parameters["Mock Classifier"]
== {"dummy_parameter": parameter, "n_jobs": -1}
for p in next_batch
]
)
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
# make sure that future batches have the same parameter value
for i in range(1, 5):
next_batch = algo.next_batch()
assert all(
[
p.parameters["Mock Classifier"]["dummy_parameter"] == parameter
for p in next_batch
]
)
@pytest.mark.parametrize(
"parameters,hyperparameters",
[
(1, Categorical([1, 3, 4])),
(3, Integer(2, 4)),
(5, Categorical([1, 3, 4])),
(1, 1),
],
)
def test_iterative_algorithm_custom_hyperparameters(
parameters,
hyperparameters,
X_y_binary,
dummy_binary_pipeline_classes,
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes(parameters)
if not isinstance(hyperparameters, (Categorical, Integer, Real)):
with pytest.raises(
ValueError, match="Custom hyperparameters should only contain skopt"
):
IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
random_seed=0,
pipeline_params={"Mock Classifier": {"dummy_parameter": parameters}},
custom_hyperparameters={
"Mock Classifier": {"dummy_parameter": hyperparameters}
},
)
return
else:
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
random_seed=0,
pipeline_params={"Mock Classifier": {"dummy_parameter": parameters}},
custom_hyperparameters={
"Mock Classifier": {"dummy_parameter": hyperparameters}
},
)
next_batch = algo.next_batch()
assert all([p.parameters["Mock Classifier"]["n_jobs"] == -1 for p in next_batch])
assert all(
[
p.parameters["Mock Classifier"]["dummy_parameter"] == parameters
for p in next_batch
]
)
scores = np.arange(0, len(next_batch))
if parameters not in hyperparameters:
for score, pipeline in zip(scores, next_batch):
with pytest.raises(ValueError, match="Default parameters for components"):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
else:
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
# make sure that future batches remain in the hyperparam range
all_dummies = set()
for i in range(1, 5):
next_batch = algo.next_batch()
for p in next_batch:
dummy = p.parameters["Mock Classifier"]["dummy_parameter"]
if dummy not in all_dummies:
all_dummies.add(dummy)
assert all(
[
p.parameters["Mock Classifier"]["dummy_parameter"]
in hyperparameters
for p in next_batch
]
)
assert all_dummies == {1, 3, 4} if parameters == 1 else all_dummies == {2, 3, 4}
def test_iterative_algorithm_pipeline_params_kwargs(
X_y_binary, dummy_binary_pipeline_classes
):
X, y = X_y_binary
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
pipeline_params={
"Mock Classifier": {"dummy_parameter": "dummy", "fake_param": "fake"}
},
random_seed=0,
)
next_batch = algo.next_batch()
assert all(
[
p.parameters["Mock Classifier"]
== {"dummy_parameter": "dummy", "n_jobs": -1, "fake_param": "fake"}
for p in next_batch
]
)
def test_iterative_algorithm_results_best_pipeline_info_id(
X_y_binary,
dummy_binary_pipeline_classes,
logistic_regression_binary_pipeline_class,
):
X, y = X_y_binary
LogisticRegressionBinaryPipeline = logistic_regression_binary_pipeline_class
(
dummy_binary_pipeline_classes,
allowed_component_graphs,
) = dummy_binary_pipeline_classes()
allowed_component_graphs = {
"graph_1": allowed_component_graphs["graph_1"],
"graph_2": LogisticRegressionBinaryPipeline.component_graph,
}
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="binary",
allowed_component_graphs=allowed_component_graphs,
)
# initial batch contains one of each pipeline, with default parameters
next_batch = algo.next_batch()
scores = np.arange(0, len(next_batch))
for pipeline_num, (score, pipeline) in enumerate(zip(scores, next_batch)):
algo.add_result(score, pipeline, {"id": algo.pipeline_number + pipeline_num})
assert algo._best_pipeline_info[ModelFamily.RANDOM_FOREST]["id"] == 3
assert algo._best_pipeline_info[ModelFamily.LINEAR_MODEL]["id"] == 2
for i in range(1, 3):
next_batch = algo.next_batch()
scores = -np.arange(
1, len(next_batch)
) # Score always gets better with each pipeline
for pipeline_num, (score, pipeline) in enumerate(zip(scores, next_batch)):
algo.add_result(
score, pipeline, {"id": algo.pipeline_number + pipeline_num}
)
assert (
algo._best_pipeline_info[pipeline.model_family]["id"]
== algo.pipeline_number + pipeline_num
)
@pytest.mark.parametrize(
"problem_type",
[ProblemTypes.REGRESSION, ProblemTypes.BINARY, ProblemTypes.MULTICLASS],
)
def test_iterative_algorithm_first_batch_order(
problem_type, X_y_binary, has_minimal_dependencies
):
X, y = X_y_binary
algo = IterativeAlgorithm(X=X, y=y, problem_type=problem_type)
# initial batch contains one of each pipeline, with default parameters
next_batch = algo.next_batch()
estimators_in_first_batch = [p.estimator.name for p in next_batch]
if problem_type == ProblemTypes.REGRESSION:
linear_models = ["Elastic Net Regressor"]
extra_dep_estimators = [
"XGBoost Regressor",
"LightGBM Regressor",
"CatBoost Regressor",
]
core_estimators = [
"Random Forest Regressor",
"Decision Tree Regressor",
"Extra Trees Regressor",
]
else:
linear_models = ["Elastic Net Classifier", "Logistic Regression Classifier"]
extra_dep_estimators = [
"XGBoost Classifier",
"LightGBM Classifier",
"CatBoost Classifier",
]
core_estimators = [
"Random Forest Classifier",
"Decision Tree Classifier",
"Extra Trees Classifier",
]
if has_minimal_dependencies:
extra_dep_estimators = []
assert (
estimators_in_first_batch
== linear_models + extra_dep_estimators + core_estimators
)
def test_iterative_algorithm_first_batch_order_param(
X_y_binary, has_minimal_dependencies
):
X, y = X_y_binary
# put random forest first
estimator_family_order = [
ModelFamily.RANDOM_FOREST,
ModelFamily.LINEAR_MODEL,
ModelFamily.DECISION_TREE,
ModelFamily.EXTRA_TREES,
ModelFamily.XGBOOST,
ModelFamily.LIGHTGBM,
ModelFamily.CATBOOST,
]
algo = IterativeAlgorithm(
X=X, y=y, problem_type="binary", _estimator_family_order=estimator_family_order
)
next_batch = algo.next_batch()
estimators_in_first_batch = [p.estimator.name for p in next_batch]
final_estimators = [
"XGBoost Classifier",
"LightGBM Classifier",
"CatBoost Classifier",
]
if has_minimal_dependencies:
final_estimators = []
assert (
estimators_in_first_batch
== [
"Random Forest Classifier",
"Elastic Net Classifier",
"Logistic Regression Classifier",
"Decision Tree Classifier",
"Extra Trees Classifier",
]
+ final_estimators
)
@pytest.mark.parametrize(
"sampler",
["Undersampler", "Oversampler"],
)
@pytest.mark.parametrize("problem_type", [ProblemTypes.BINARY, ProblemTypes.MULTICLASS])
def test_iterative_algorithm_sampling_params(
problem_type, sampler, mock_imbalanced_data_X_y, has_minimal_dependencies
):
if has_minimal_dependencies and sampler != "Undersampler":
pytest.skip(
"Minimal dependencies, so we don't test the oversamplers for iterative algorithm"
)
X, y = mock_imbalanced_data_X_y(problem_type, "some", "small")
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type=problem_type,
random_seed=0,
sampler_name=sampler,
)
next_batch = algo.next_batch()
for p in next_batch:
for component in p.component_graph:
if "sampler" in component.name:
assert component.parameters["sampling_ratio"] == 0.25
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
# # make sure that future batches remain in the hyperparam range
for i in range(1, 5):
next_batch = algo.next_batch()
for p in next_batch:
for component in p.component_graph:
if "sampler" in component.name:
assert component.parameters["sampling_ratio"] == 0.25
@pytest.mark.parametrize("allowed_model_families", [None, [ModelFamily.XGBOOST]])
@pytest.mark.parametrize(
"allowed_component_graphs",
[None, {"Pipeline_1": ["Imputer", "XGBoost Classifier"]}],
)
@pytest.mark.parametrize("allow_long_running_models", [True, False])
@pytest.mark.parametrize(
"length,models_missing",
[
(10, []),
(75, []),
(100, ["Elastic Net Classifier", "XGBoost Classifier"]),
(160, ["Elastic Net Classifier", "XGBoost Classifier", "CatBoost Classifier"]),
],
)
def test_iterative_algorithm_allow_long_running_models(
length,
models_missing,
allow_long_running_models,
allowed_component_graphs,
allowed_model_families,
has_minimal_dependencies,
):
if has_minimal_dependencies:
return
X = pd.DataFrame()
y = pd.Series([i for i in range(length)] * 5)
y_short = pd.Series([i for i in range(10)] * 5)
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="multiclass",
random_seed=0,
allowed_model_families=allowed_model_families,
allowed_component_graphs=allowed_component_graphs,
allow_long_running_models=allow_long_running_models,
)
if allowed_model_families is not None or allowed_component_graphs is not None:
assert len(algo.allowed_pipelines) == 1
return
algo_short = IterativeAlgorithm(
X=X,
y=y_short,
problem_type="multiclass",
random_seed=0,
allowed_model_families=allowed_model_families,
allowed_component_graphs=allowed_component_graphs,
)
if allow_long_running_models:
assert len(algo_short.allowed_pipelines) == len(algo.allowed_pipelines)
else:
assert len(algo_short.allowed_pipelines) == len(algo.allowed_pipelines) + len(
models_missing
)
for p in algo.allowed_pipelines:
assert all([s not in p.name for s in models_missing])
@pytest.mark.parametrize("problem", ["binary", "multiclass", "regression"])
@pytest.mark.parametrize("allow_long_running_models", [True, False])
@pytest.mark.parametrize(
"length,models_missing", [(10, 0), (75, 0), (100, 2), (160, 3)]
)
def test_iterative_algorithm_allow_long_running_models_problem(
length, models_missing, allow_long_running_models, problem, has_minimal_dependencies
):
X = pd.DataFrame()
y = pd.Series([i for i in range(length)] * 5)
y_short = pd.Series([i for i in range(10)] * 5)
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type=problem,
random_seed=0,
allow_long_running_models=allow_long_running_models,
)
algo_reg = IterativeAlgorithm(
X=X,
y=y_short,
problem_type=problem,
random_seed=0,
)
if problem != "multiclass" or allow_long_running_models:
assert len(algo.allowed_pipelines) == len(algo_reg.allowed_pipelines)
models_missing = 0
if has_minimal_dependencies and models_missing > 0:
# no XGBoost or CatBoost installed
models_missing = 1
assert len(algo.allowed_pipelines) + models_missing == len(
algo_reg.allowed_pipelines
)
def test_iterative_algorithm_allow_long_running_models_next_batch(
has_minimal_dependencies,
):
models_missing = [
"Elastic Net Classifier",
"XGBoost Classifier",
"CatBoost Classifier",
]
if has_minimal_dependencies:
models_missing = ["Elastic Net Classifier"]
X = pd.DataFrame()
y = pd.Series([i for i in range(200)] * 5)
algo = IterativeAlgorithm(
X=X,
y=y,
problem_type="multiclass",
random_seed=0,
allow_long_running_models=False,
)
next_batch = algo.next_batch()
for pipeline in next_batch:
assert all([m not in pipeline.name for m in models_missing])
# the "best" score will be the 1st dummy pipeline
scores = np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
for i in range(1, 5):
next_batch = algo.next_batch()
for pipeline in next_batch:
assert all([m not in pipeline.name for m in models_missing])
scores = -np.arange(0, len(next_batch))
for score, pipeline in zip(scores, next_batch):
algo.add_result(score, pipeline, {"id": algo.pipeline_number})
| 33.747655 | 123 | 0.630605 |
53b44948425c6b3d8fe3277b00d622fee9036796 | 328 | py | Python | pymcbdsc/exceptions.py | ktooi/pymcbdsc | 5fccd02adad21ed6e25f955357fc8daad075ae17 | [
"MIT"
] | null | null | null | pymcbdsc/exceptions.py | ktooi/pymcbdsc | 5fccd02adad21ed6e25f955357fc8daad075ae17 | [
"MIT"
] | 1 | 2021-01-22T06:15:55.000Z | 2021-01-22T06:15:55.000Z | pymcbdsc/exceptions.py | ktooi/pymcbdsc | 5fccd02adad21ed6e25f955357fc8daad075ae17 | [
"MIT"
] | null | null | null |
class FailureAgreeMeulaAndPpError(Exception):
""" MEULA と Privacy Policy への未同意であることを示す例外。
Minecraft Bedrock Edition のサーバをダウンロードする為には、 MEULA と Privacy Policy に同意する必要がありますが、
同意せずにダウンロードしようとした場合にこの例外が Raise します。
TODO:
例外のメッセージに、 MEULA 及び Privacy Policy への同意が必要であるということがわかりやすいメッセージを追加する。
"""
pass
| 27.333333 | 85 | 0.756098 |
34e1ecc8bae9a918d86f5f13887a9ddf8a1e627a | 3,155 | py | Python | test_helper.py | AndyRae/uk-box-office | b40249ede430e66c2b01a0bbc00b4ab275817bee | [
"MIT"
] | null | null | null | test_helper.py | AndyRae/uk-box-office | b40249ede430e66c2b01a0bbc00b4ab275817bee | [
"MIT"
] | 5 | 2020-05-24T18:57:33.000Z | 2022-02-06T13:00:36.000Z | test_helper.py | AndyRae/uk-box-office | b40249ede430e66c2b01a0bbc00b4ab275817bee | [
"MIT"
] | null | null | null | import pytest
from unittest.mock import patch, Mock
import unittest
from datetime import datetime, timedelta
import pandas as pd
import helper
@patch("helper.pd.read_csv")
@pytest.mark.parametrize(
"test_input,expected",
[
("20TH CENTRUY FOX", "20TH CENTURY FOX",),
("WARNER BROS.", "WARNER BROS"),
("CURZON", "CURZON"),
],
)
def test_spellcheck_distributor(mock_read_csv, test_input, expected):
mock_read_csv.return_value = pd.DataFrame(
{
"key": ["20TH CENTRUY FOX", "WARNER BROS."],
"correction": ["20TH CENTURY FOX", "WARNER BROS"],
}
)
result = helper.spellcheck_distributor(test_input)
assert result == expected
mock_read_csv.assert_called_once_with("./data/distributor_check.csv", header=None)
@patch("helper.pd.read_csv")
@pytest.mark.parametrize(
"test_input,expected",
[
(
"HARRY POTTER AND THE HALF BLOOD PRINCE",
"HARRY POTTER AND THE HALF-BLOOD PRINCE",
),
("WOMAN IN BLACK, THE", "THE WOMAN IN BLACK"),
("THE WOMAN IN BLACK (MOMENTUM)", "THE WOMAN IN BLACK"),
("LA DOLCE VITA", "LA DOLCE VITA"),
],
)
def test_spellcheck_film(mock_read_csv, test_input, expected):
mock_read_csv.return_value = pd.DataFrame(
{
"key": [
"HARRY POTTER AND THE HALF BLOOD PRINCE",
"WOMAN IN BLACK, THE",
"THE WOMAN IN BLACK (MOMENTUM)",
],
"correction": [
"HARRY POTTER AND THE HALF-BLOOD PRINCE",
"THE WOMAN IN BLACK",
"THE WOMAN IN BLACK",
],
}
)
result = helper.spellcheck_film(test_input)
assert result == expected
mock_read_csv.assert_called_once_with("./data/film_check.csv", header=None)
def test_get_last_sunday():
# TODO: This is not how to test a datetime function.
today = datetime.now()
sunday = today - timedelta(days=today.isoweekday())
assert helper.get_last_sunday() == sunday.strftime("%Y%m%d")
# TODO: Add further test parametrize objects - this needs to be more extensive
@patch("helper.pd.read_csv")
@pytest.mark.parametrize(
"test_input,expected", [(1, 43707991.0,), (10, 271538.0),],
)
def test_get_week_box_office(mock_read_csv, test_input, expected):
d = {
"date": 20200315,
"rank": 1.0,
"title": "1917",
"country": "UK/USA",
"weekend_gross": 108947.0,
"distributor": "EONE FILMS",
"weeks_on_release": test_input,
"number_of_cinemas": 293.0,
"total_gross": 43707991.0,
}
df = pd.Series(data=d)
mock_read_csv.return_value = pd.DataFrame(
{
"date": [20200308],
"rank": [1.0],
"title": ["1917"],
"country": ["UK/USA"],
"weekend_gross": [247291.0],
"distributor": ["EONE FILMS"],
"weeks_on_release": [9],
"number_of_cinemas": [388.0],
"total_gross": [43436453.0],
})
result = helper.get_week_box_office(df)
assert result == expected
| 29.485981 | 86 | 0.586054 |
85e46e73e698c388fa96b0566ce69a4957c9da81 | 2,308 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/JoinResourceGroupRequest.py | DataDog/aliyun-openapi-python-sdk | 5cbee29bce6416dd62f61f0c3786b1af6ea0d84f | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/JoinResourceGroupRequest.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/JoinResourceGroupRequest.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class JoinResourceGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'JoinResourceGroup','ecs')
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceId(self):
return self.get_query_params().get('ResourceId')
def set_ResourceId(self,ResourceId):
self.add_query_param('ResourceId',ResourceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType) | 34.969697 | 76 | 0.775563 |
f9e606769c62168b4ef4af5082f00c6aceacb880 | 5,011 | py | Python | docs/conf.py | dhinakg/BitSTAR | f2693c5a0612e58e337511023f8f9e4f25543e33 | [
"Apache-2.0"
] | 6 | 2017-04-29T03:45:56.000Z | 2018-05-27T02:03:13.000Z | docs/conf.py | dhinakg/BitSTAR | f2693c5a0612e58e337511023f8f9e4f25543e33 | [
"Apache-2.0"
] | 18 | 2017-04-12T20:26:05.000Z | 2018-06-23T18:11:55.000Z | docs/conf.py | dhinakg/BitSTAR | f2693c5a0612e58e337511023f8f9e4f25543e33 | [
"Apache-2.0"
] | 16 | 2017-04-30T05:04:15.000Z | 2019-08-15T04:59:09.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Starbot documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 31 00:14:27 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Starbot'
copyright = '2017, Sydney Erickson and CorpNewt'
author = 'Sydney Erickson and CorpNewt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Starbotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Starbot.tex', 'Starbot Documentation',
'Sydney Erickson and CorpNewt', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'starbot', 'Starbot Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Starbot', 'Starbot Documentation',
author, 'Starbot', 'One line description of project.',
'Miscellaneous'),
]
| 30.005988 | 79 | 0.683696 |
d94bba06b081285a9b011ac112ab365d2868111e | 1,377 | py | Python | flink-ml-operator/src/test/python/input_output_row.py | rohankumardubey/dl-on-flink | 60646aa9520f49619b64e9ff03ce73959e8a3858 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2022-02-03T23:54:10.000Z | 2022-02-03T23:54:10.000Z | flink-ml-operator/src/test/python/input_output_row.py | rohankumardubey/dl-on-flink | 60646aa9520f49619b64e9ff03ce73959e8a3858 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | flink-ml-operator/src/test/python/input_output_row.py | rohankumardubey/dl-on-flink | 60646aa9520f49619b64e9ff03ce73959e8a3858 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import traceback
from flink_ml_framework.java_file import *
def map_func(context):
bytes_recorder = BytesRecorder(context.from_java(), context.to_java())
try:
while True:
data = bytes_recorder.read_record()
print(context.index, "data:", data)
sys.stdout.flush()
res = bytes_recorder.write_record(data)
print(context.index, "res:", res)
sys.stdout.flush()
except Exception as e:
msg = traceback.format_exc()
print (msg)
| 37.216216 | 75 | 0.708061 |
b909bdc1dff1391c8dcfaee628e9b27ccc1ec2b2 | 420 | py | Python | old_code/costum_loss_functions.py | pgruening/dlbio | 0c4e468bcd5d7e298fbecba13003bcae36889486 | [
"MIT"
] | 1 | 2020-10-08T11:14:48.000Z | 2020-10-08T11:14:48.000Z | old_code/costum_loss_functions.py | pgruening/dlbio | 0c4e468bcd5d7e298fbecba13003bcae36889486 | [
"MIT"
] | 5 | 2020-03-24T18:01:02.000Z | 2022-03-12T00:17:24.000Z | old_code/costum_loss_functions.py | pgruening/dlbio | 0c4e468bcd5d7e298fbecba13003bcae36889486 | [
"MIT"
] | 1 | 2021-11-29T10:31:28.000Z | 2021-11-29T10:31:28.000Z | class INetworkLossFunction(object):
def __call__(self, y_true, y_pred):
raise NotImplementedError
class CostumMetric(INetworkLossFunction):
def __init__(self, name, mode, func):
self.mode = mode
self.__name__ = name
self.func = func
# NOTE: This base code is meant for using keras functions
def __call__(self, y_true, y_pred):
return self.func(y_true, y_pred)
| 26.25 | 61 | 0.67619 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.