repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
snakecon/AI_Lab | spider/book/book/pipelines.py | 1 | 4704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
import book.database as db
from scrapy import Request
from scrapy.utils.misc import arg_to_iter
from twisted.internet.defer import DeferredList
from scrapy.pipelines.images import ImagesPipeline
from book.items import Subject, Meta, Comment
class BookPipeline(object):
def get_subject(self, item):
sql = 'SELECT * FROM subjects WHERE douban_id=%s' % item['douban_id']
return db.conn.get(sql)
def save_subject(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO subjects (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *values)
def get_meta(self, item):
sql = 'SELECT * FROM books WHERE douban_id=%s' % item['douban_id']
return db.conn.get(sql)
def save_meta(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO books (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *(i.strip() for i in values))
def update_meta(self, item):
douban_id = item.pop('douban_id')
keys = item.keys()
values = item.values()
values.append(douban_id)
fields = ['%s=' % i + '%s' for i in keys]
sql = 'UPDATE books SET %s WHERE douban_id=%s\
' % (','.join(fields), '%s')
db.conn.update(sql, *values)
def get_comment(self, item):
sql = 'SELECT * FROM comments WHERE douban_comment_id=%s\
' % item['douban_comment_id']
return db.conn.get(sql)
def save_comment(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
db.conn.execute('SET NAMES utf8mb4')
sql = 'INSERT INTO comments (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *(i.strip() for i in values))
def process_item(self, item, spider):
if isinstance(item, Subject):
'''
subject
'''
exsit = self.get_subject(item)
if not exsit:
self.save_subject(item)
elif isinstance(item, Meta):
'''
book meta
'''
exsit = self.get_meta(item)
if not exsit:
try:
self.save_meta(item)
except Exception, e:
print item
print e
else:
self.update_meta(item)
elif isinstance(item, Comment):
'''
book comment
'''
exsit = self.get_comment(item)
if not exsit:
try:
self.save_comment(item)
except Exception, e:
print item
print e
return item
class CoverPipeline(ImagesPipeline):
def process_item(self, item, spider):
if spider.name != 'meta':
return item
info = self.spiderinfo
requests = arg_to_iter(self.get_media_requests(item, info))
dlist = [self._process_request(r, info) for r in requests]
dfd = DeferredList(dlist, consumeErrors=1)
return dfd.addCallback(self.item_completed, item, info)
def file_path(self, request, response=None, info=None):
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are \
deprecated, please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
image_guid = hashlib.sha1(url).hexdigest()
return '%s%s/%s%s/%s.jpg\
' % (image_guid[9], image_guid[19], image_guid[29], image_guid[39], image_guid)
def get_media_requests(self, item, info):
if item['cover']:
return Request(item['cover'])
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if image_paths:
item['cover'] = image_paths[0]
else:
item['cover'] = ''
return item
| apache-2.0 |
elkingtonmcb/bcbio-nextgen | bcbio/variation/validateplot.py | 1 | 15359 | """Plot validation results from variant calling comparisons.
Handles data normalization and plotting, emphasizing comparisons on methodology
differences.
"""
import collections
import os
import numpy as np
import pandas as pd
try:
import matplotlib as mpl
mpl.use('Agg', force=True)
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
except ImportError:
mpl, plt = None, None
try:
import seaborn as sns
except ImportError:
sns = None
from bcbio.log import logger
from bcbio import utils
from bcbio.variation import bamprep
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None):
"""Create a plot from individual summary csv files with classification metrics.
"""
df = pd.concat([pd.read_csv(x) for x in plot_files])
df.to_csv(out_csv, index=False)
return classifyplot_from_valfile(out_csv, outtype, title, size)
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None):
"""Create a plot from a summarized validation file.
Does new-style plotting of summarized metrics of
false negative rate and false discovery rate.
https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
df = pd.read_csv(val_file)
grouped = df.groupby(["sample", "caller", "vtype"])
df = grouped.apply(_calculate_fnr_fdr)
df = df.reset_index()
out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype)
_do_classifyplot(df, out_file, title, size)
return [out_file]
def _calculate_fnr_fdr(group):
"""Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision).
"""
data = {k: d["value"] for k, d in group.set_index("metric").T.to_dict().items()}
return pd.DataFrame([{"fnr": data["fn"] / float(data["tp"] + data["fn"]) * 100.0 if data["tp"] > 0 else 0.0,
"fdr": data["fp"] / float(data["tp"] + data["fp"]) * 100.0 if data["tp"] > 0 else 0.0,
"tpr": "TP: %s FN: %s" % (data["tp"], data["fn"]),
"spc": "FP: %s" % (data["fp"])}])
def _do_classifyplot(df, out_file, title=None, size=None):
"""Plot using classification-based plot using seaborn.
"""
metric_labels = {"fdr": "False discovery rate",
"fnr": "False negative rate"}
metrics = [("fnr", "tpr"), ("fdr", "spc")]
colors = ["light grey", "greyish"]
data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict()
plt.ioff()
sns.set(style='white')
vtypes = sorted(df["vtype"].unique(), reverse=True)
callers = sorted(df["caller"].unique())
samples = sorted(df["sample"].unique())
fig, axs = plt.subplots(len(vtypes) * len(callers), len(metrics))
fig.text(.5, .95, title if title else "", horizontalalignment='center', size=14)
for vi, vtype in enumerate(vtypes):
sns.set_palette(sns.xkcd_palette([colors[vi]]))
for ci, caller in enumerate(callers):
for j, (metric, label) in enumerate(metrics):
cur_plot = axs[vi * len(vtypes) + ci][j]
vals, labels = [], []
for sample in samples:
cur_data = data_dict[(sample, caller, vtype)]
vals.append(cur_data[metric])
labels.append(cur_data[label])
cur_plot.barh(np.arange(len(samples)), vals)
all_vals = []
for k, d in data_dict.items():
if k[-1] == vtype:
for m in metrics:
all_vals.append(d[m[0]])
metric_max = max(all_vals)
cur_plot.set_xlim(0, metric_max)
pad = 0.1 * metric_max
for ai, (val, label) in enumerate(zip(vals, labels)):
cur_plot.annotate(label, (pad + (0 if max(vals) > metric_max / 2.0 else max(vals)),
ai + 0.35), va='center', size=7)
if j == 0:
cur_plot.tick_params(axis='y', which='major', labelsize=8)
cur_plot.locator_params(nbins=len(samples) + 2, axis="y", tight=True)
cur_plot.set_yticklabels(samples, size=8, va="bottom")
cur_plot.set_title("%s: %s" % (vtype, caller), fontsize=12, loc="left")
else:
cur_plot.get_yaxis().set_ticks([])
if ci == len(callers) - 1:
cur_plot.tick_params(axis='x', which='major', labelsize=8)
cur_plot.get_xaxis().set_major_formatter(
FuncFormatter(lambda v, p: "%s%%" % (int(v) if round(v) == v else v)))
if vi == len(vtypes) - 1:
cur_plot.get_xaxis().set_label_text(metric_labels[metric], size=12)
else:
cur_plot.get_xaxis().set_ticks([])
cur_plot.spines['bottom'].set_visible(False)
cur_plot.spines['left'].set_visible(False)
cur_plot.spines['top'].set_visible(False)
cur_plot.spines['right'].set_visible(False)
x, y = (6, len(vtypes) * len(callers) + 1 * 0.5 * len(samples)) if size is None else size
fig.set_size_inches(x, y)
fig.tight_layout(rect=(0, 0, 1, 0.95))
plt.subplots_adjust(hspace=0.6)
fig.savefig(out_file)
def create_from_csv(in_csv, config=None, outtype="png", title=None, size=None):
df = pd.read_csv(in_csv)
create(df, None, 0, config or {}, os.path.splitext(in_csv)[0], outtype, title,
size)
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png",
title=None, size=None):
"""Create plots of validation results for a sample, labeling prep strategies.
"""
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
if header:
df = pd.DataFrame(plot_data, columns=header)
else:
df = plot_data
df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]]
df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]]
floors = get_group_floors(df, cat_labels)
df["value.floor"] = [get_floor_value(x, cat, vartype, floors)
for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])]
out = []
for i, prep in enumerate(df["bamprep"].unique()):
out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size))
return out
cat_labels = {"concordant": "Concordant",
"discordant-missing-total": "Discordant (missing)",
"discordant-extra-total": "Discordant (extra)",
"discordant-shared-total": "Discordant (shared)"}
vtype_labels = {"snp": "SNPs", "indel": "Indels"}
prep_labels = {}
caller_labels = {"ensemble": "Ensemble", "freebayes": "FreeBayes",
"gatk": "GATK Unified\nGenotyper", "gatk-haplotype": "GATK Haplotype\nCaller"}
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None,
size=None):
"""Plot comparison between BAM preparation methods.
"""
samples = df[(df["bamprep"] == prep)]["sample"].unique()
assert len(samples) >= 1, samples
out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype)
df = df[df["category"].isin(cat_labels)]
_seaborn(df, prep, prepi, out_file, title, size)
return out_file
def _seaborn(df, prep, prepi, out_file, title=None, size=None):
"""Plot using seaborn wrapper around matplotlib.
"""
plt.ioff()
sns.set(style='dark')
vtypes = df["variant.type"].unique()
callers = sorted(df["caller"].unique())
cats = _check_cats(["concordant", "discordant-missing-total",
"discordant-extra-total", "discordant-shared-total"],
vtypes, df, prep, callers)
fig, axs = plt.subplots(len(vtypes), len(cats))
width = 0.8
for i, vtype in enumerate(vtypes):
ax_row = axs[i] if len(vtypes) > 1 else axs
for j, cat in enumerate(cats):
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
if len(cats) == 1:
assert j == 0
ax = ax_row
else:
ax = ax_row[j]
if i == 0:
ax.set_title(cat_labels[cat], size=14)
ax.get_yaxis().set_ticks([])
if j == 0:
ax.set_ylabel(vtype_labels[vtype], size=14)
ax.bar(np.arange(len(callers)), vals, width=width)
ax.set_ylim(0, maxval)
if i == len(vtypes) - 1:
ax.set_xticks(np.arange(len(callers)) + width / 2.0)
ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else ""
for x in callers], size=8, rotation=45)
else:
ax.get_xaxis().set_ticks([])
_annotate(ax, labels, vals, np.arange(len(callers)), width)
fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16)
fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1)
x, y = (10, 5) if size is None else size
fig.set_size_inches(x, y)
fig.savefig(out_file)
def _check_cats(cats, vtypes, df, prep, callers):
"""Only include categories in the final output if they have values.
"""
out = []
for cat in cats:
all_vals = []
for vtype in vtypes:
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
all_vals.extend(vals)
if sum(all_vals) / float(len(all_vals)) > 2:
out.append(cat)
if len(out) == 0:
return cats
else:
return out
def _get_chart_info(df, vtype, cat, prep, callers):
"""Retrieve values for a specific variant type, category and prep method.
"""
maxval_raw = max(list(df["value.floor"]))
curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat)
& (df["bamprep"] == prep)]
vals = []
labels = []
for c in callers:
row = curdf[df["caller"] == c]
if len(row) > 0:
vals.append(list(row["value.floor"])[0])
labels.append(list(row["value"])[0])
else:
vals.append(1)
labels.append("")
return vals, labels, maxval_raw
def _annotate(ax, annotate, height, left, width):
"""Annotate axis with labels.
"""
annotate_yrange_factor = 0.010
xticks = np.array(left) + width / 2.0
ymin, ymax = ax.get_ylim()
yrange = ymax - ymin
# Reset ymax and ymin so there's enough room to see the annotation of
# the top-most
if ymax > 0:
ymax += yrange * 0.15
if ymin < 0:
ymin -= yrange * 0.15
ax.set_ylim(ymin, ymax)
yrange = ymax - ymin
offset_ = yrange * annotate_yrange_factor
if isinstance(annotate, collections.Iterable):
annotations = map(str, annotate)
else:
annotations = ['%.3f' % h if type(h) is np.float_ else str(h)
for h in height]
for x, h, annotation in zip(xticks, height, annotations):
# Adjust the offset to account for negative bars
offset = offset_ if h >= 0 else -1 * offset_
verticalalignment = 'bottom' if h >= 0 else 'top'
if len(str(annotation)) > 6:
size = 7
elif len(str(annotation)) > 5:
size = 8
else:
size = 10
# Finally, add the text to the axes
ax.annotate(annotation, (x, h + offset),
verticalalignment=verticalalignment,
horizontalalignment='center',
size=size)
def _ggplot(df, out_file):
"""Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional
"""
import ggplot as gg
df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]]
df["category"] = [cat_labels[x] for x in df["category"]]
df["caller"] = [caller_labels.get(x, None) for x in df["caller"]]
p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar()
+ gg.facet_wrap("variant.type", "category")
+ gg.theme_seaborn())
gg.ggsave(p, out_file)
def get_floor_value(x, cat, vartype, floors):
"""Modify values so all have the same relative scale for differences.
Using the chosen base heights, adjusts an individual sub-plot to be consistent
relative to that height.
"""
all_base = floors[vartype]
cur_max = floors[(cat, vartype)]
if cur_max > all_base:
diff = cur_max - all_base
x = max(1, x - diff)
return x
def get_group_floors(df, cat_labels):
"""Retrieve the floor for a given row of comparisons, creating a normalized set of differences.
We need to set non-zero floors so large numbers (like concordance) don't drown out small
numbers (like discordance). This defines the height for a row of comparisons as either
the minimum height of any sub-plot, or the maximum difference between higher and lower
(plus 10%).
"""
group_maxes = collections.defaultdict(list)
group_diffs = collections.defaultdict(list)
diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero
for name, group in df.groupby(["category", "variant.type"]):
label, stype = name
if label in cat_labels:
diff = max(group["value"]) - min(group["value"])
group_diffs[stype].append(diff + int(diff_pad * diff))
group_maxes[stype].append(max(group["value"]))
group_maxes[name].append(max(group["value"]))
out = {}
for k, vs in group_maxes.iteritems():
if k in group_diffs:
out[k] = max(max(group_diffs[stype]), min(vs))
else:
out[k] = min(vs)
return out
def get_aligner(x, config):
return utils.get_in(config, ("algorithm", "aligner"), "")
def get_bamprep(x, config):
params = bamprep._get_prep_params({"config": {"algorithm": config.get("algorithm", {})}})
if params["realign"] == "gatk" and params["recal"] == "gatk":
return "gatk"
elif not params["realign"] and not params["recal"]:
return "none"
elif not params.get("recal") or not params.get("realign"):
return "mixed"
else:
return ""
# ## Frequency plots
def facet_freq_plot(freq_csv, caller):
"""Prepare a facet plot of frequencies stratified by variant type and status (TP, FP, FN).
Makes a nice plot with the output from validate.freq_summary
"""
out_file = "%s.png" % os.path.splitext(freq_csv)[0]
plt.ioff()
sns.set(style='dark')
df = pd.read_csv(freq_csv)
g = sns.FacetGrid(df, row="vtype", col="valclass", margin_titles=True,
col_order=["TP", "FN", "FP"], row_order=["snp", "indel"],
sharey=False)
g.map(plt.hist, "freq", bins=20, align="left")
g.set(xlim=(0.0, 1.0))
g.fig.set_size_inches(8, 6)
g.fig.text(.05, .97, caller, horizontalalignment='center', size=14)
g.fig.savefig(out_file)
| mit |
jeremiahmarks/sl4a | python/src/Doc/includes/tzinfo-examples.py | 32 | 5063 | from datetime import tzinfo, timedelta, datetime
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class.
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# A complete implementation of current DST rules for major US time zones.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# US DST Rules
#
# This is a simplified (i.e., wrong for a few cases) set of rules for US
# DST start and end times. For a complete and up-to-date set of DST rules
# and timezone definitions, visit the Olson Database (or try pytz):
# http://www.twinsun.com/tz/tz-link.htm
# http://sourceforge.net/projects/pytz/ (might not be up-to-date)
#
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
DSTSTART_2007 = datetime(1, 3, 8, 2)
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
DSTEND_2007 = datetime(1, 11, 1, 1)
# From 1987 to 2006, DST used to start at 2am (standard time) on the first
# Sunday in April and to end at 2am (DST time; 1am standard time) on the last
# Sunday of October, which is the first Sunday on or after Oct 25.
DSTSTART_1987_2006 = datetime(1, 4, 1, 2)
DSTEND_1987_2006 = datetime(1, 10, 25, 1)
# From 1967 to 1986, DST used to start at 2am (standard time) on the last
# Sunday in April (the one on or after April 24) and to end at 2am (DST time;
# 1am standard time) on the last Sunday of October, which is the first Sunday
# on or after Oct 25.
DSTSTART_1967_1986 = datetime(1, 4, 24, 2)
DSTEND_1967_1986 = DSTEND_1987_2006
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find start and end times for US DST. For years before 1967, return
# ZERO for no DST.
if 2006 < dt.year:
dststart, dstend = DSTSTART_2007, DSTEND_2007
elif 1986 < dt.year < 2007:
dststart, dstend = DSTSTART_1987_2006, DSTEND_1987_2006
elif 1966 < dt.year < 1987:
dststart, dstend = DSTSTART_1967_1986, DSTEND_1967_1986
else:
return ZERO
start = first_sunday_on_or_after(dststart.replace(year=dt.year))
end = first_sunday_on_or_after(dstend.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| apache-2.0 |
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/dnsutil.py | 1 | 11113 | # -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import socket
# Import python libs
import logging
import time
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.fopen(hostsfile, 'a') as fp_:
fp_.write(append_line)
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
host_list = entries.split(',')
with salt.utils.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write('{0}\n'.format(line))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(' '.join(comps))
out_file.write('\n')
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.fopen(zonefile, 'r') as fp_:
zone = fp_.read()
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.'):
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for `host`.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for `host`.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dig.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dig.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = str(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
| apache-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/cms/utils/placeholder.py | 10 | 5642 | # -*- coding: utf-8 -*-
import operator
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.query_utils import Q
from django.utils import six
from sekizai.helpers import get_varname
from cms.utils import get_cms_setting
from cms.utils.compat.dj import force_unicode
def get_toolbar_plugin_struct(plugins_list, slot, page, parent=None):
"""
Return the list of plugins to render in the toolbar.
The dictionary contains the label, the classname and the module for the
plugin.
Names and modules can be defined on a per-placeholder basis using
'plugin_modules' and 'plugin_labels' attributes in CMS_PLACEHOLDER_CONF
:param plugins_list: list of plugins valid for the placeholder
:param slot: placeholder slot name
:param page: the page
:param parent: parent plugin class, if any
:return: list of dictionaries
"""
template = None
if page:
template = page.template
main_list = []
for plugin in plugins_list:
allowed_parents = plugin().get_parent_classes(slot, page)
if parent:
## skip to the next if this plugin is not allowed to be a child
## of the parent
if allowed_parents and parent.__name__ not in allowed_parents:
continue
else:
if allowed_parents:
continue
modules = get_placeholder_conf("plugin_modules", slot, template, default={})
names = get_placeholder_conf("plugin_labels", slot, template, default={})
main_list.append({'value': plugin.value,
'name': force_unicode(names.get(plugin.value, plugin.name)),
'module': force_unicode(modules.get(plugin.value, plugin.module))})
return sorted(main_list, key=operator.itemgetter("module"))
def get_placeholder_conf(setting, placeholder, template=None, default=None):
"""
Returns the placeholder configuration for a given setting. The key would for
example be 'plugins' or 'name'.
If a template is given, it will try
CMS_PLACEHOLDER_CONF['template placeholder'] and
CMS_PLACEHOLDER_CONF['placeholder'], if no template is given only the latter
is checked.
"""
if placeholder:
keys = []
if template:
keys.append("%s %s" % (template, placeholder))
keys.append(placeholder)
for key in keys:
conf = get_cms_setting('PLACEHOLDER_CONF').get(key)
if not conf:
continue
value = conf.get(setting)
if value is not None:
return value
inherit = conf.get('inherit')
if inherit :
if ' ' in inherit:
inherit = inherit.split(' ')
else:
inherit = (None, inherit,)
value = get_placeholder_conf(setting, inherit[1], inherit[0], default)
if value is not None:
return value
return default
def get_page_from_placeholder_if_exists(placeholder):
import warnings
warnings.warn(
"The get_page_from_placeholder_if_exists function is deprecated. Use placeholder.page instead",
DeprecationWarning
)
return placeholder.page if placeholder else None
def validate_placeholder_name(name):
if not isinstance(name, six.string_types):
raise ImproperlyConfigured("Placeholder identifier names need to be of type string. ")
if not all(ord(char) < 128 for char in name):
raise ImproperlyConfigured("Placeholder identifiers names may not "
"contain non-ascii characters. If you wish your placeholder "
"identifiers to contain non-ascii characters when displayed to "
"users, please use the CMS_PLACEHOLDER_CONF setting with the 'name' "
"key to specify a verbose name.")
class PlaceholderNoAction(object):
can_copy = False
def copy(self, **kwargs):
return False
def get_copy_languages(self, **kwargs):
return []
class MLNGPlaceholderActions(PlaceholderNoAction):
can_copy = True
def copy(self, target_placeholder, source_language, fieldname, model, target_language, **kwargs):
trgt = model.objects.get(**{fieldname: target_placeholder})
src = model.objects.get(master=trgt.master, language_code=source_language)
source_placeholder = getattr(src, fieldname, None)
if not source_placeholder:
return False
plugins = source_placeholder.get_plugins_list()
cache = {}
new_plugins = []
for p in plugins:
new_plugins.append(p.copy_plugin(target_placeholder, target_language, cache))
return new_plugins
def get_copy_languages(self, placeholder, model, fieldname, **kwargs):
manager = model.objects
src = manager.get(**{fieldname: placeholder})
query = Q(master=src.master)
query &= Q(**{'%s__cmsplugin__isnull' % fieldname: False})
query &= ~Q(pk=src.pk)
language_codes = manager.filter(query).values_list('language_code', flat=True).distinct()
return [(lc, dict(settings.LANGUAGES)[lc]) for lc in language_codes]
def restore_sekizai_context(context, changes):
varname = get_varname()
sekizai_container = context.get(varname)
for key, values in changes.items():
sekizai_namespace = sekizai_container[key]
for value in values:
sekizai_namespace.append(value)
| mit |
iffy/AutobahnPython | examples/twisted/websocket/wrapping/client_endpoint.py | 2 | 2032 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from twisted.internet.protocol import Protocol
class HelloClientProtocol(Protocol):
def connectionMade(self):
print("connectionMade")
self.transport.write('hello')
def dataReceived(self, data):
print("dataReceived: {}".format(data))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import clientFromString
log.startLogging(sys.stdout)
wrappedFactory = Factory.forProtocol(HelloClientProtocol)
endpoint = clientFromString(reactor, "autobahn:tcp\:localhost\:9000:url=ws\://localhost\:9000")
endpoint.connect(wrappedFactory)
reactor.run()
| mit |
rouault/Quantum-GIS | python/plugins/processing/algs/grass7/ext/r_li_padrange_ascii.py | 12 | 1544 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_padrange_ascii.py
----------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .r_li import checkMovingWindow, configFile, moveOutputTxtFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context, True)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
moveOutputTxtFile(alg, parameters, context)
| gpl-2.0 |
shiburizu/py2discord | py2discord.py | 1 | 8389 | import discord
import sqlite3 as sql
import logging
import cleverbot
import random
logging.basicConfig(level=logging.INFO)
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
from apiclient.discovery import build
import apiclient.errors
# Please refer to the README to find where you should paste your bot's credentials for services.
blacklistwords = ['image','gif','help','add','talk','permissions','blacklist','whitelist']
maxwhiterank = 3
maxblackrank = 3
service = build("customsearch", "v1",
developerKey="CREATE FROM CONSOLE.DEVELOPERS.GOOGLE.COM")
class client(discord.Client):
def isBlacklisted(self,msg,p):
c.execute("SELECT level FROM blacklist where id = ?", (msg.author.id,))
blacklist = c.fetchone()
if blacklist:
val = int(blacklist[0][0])
if val >= int(p):
self.send_message(msg.channel,'%s is blacklist level %s, therefore this command is locked.' % (msg.author.name, blacklist[0][0]))
return True
else:
return False
else:
return False
def isWhitelisted(self,msg,p):
c.execute("SELECT level FROM whitelist where id = ?", (msg.author.id,))
whitelist = c.fetchone()
if whitelist:
val = int(whitelist[0][0])
if val >= int(p):
return True
else:
self.send_message(msg.channel,'%s does not have sufficient permissions to use that command.' % msg.author.name)
return False
else:
self.send_message(msg.channel,'%s does not have sufficient permissions to use that command.' % msg.author.name)
return False
def on_message(self, message):
p = self.isBlacklisted(message,'3')
if p == False:
if message.content.startswith('$help'):
commands = c.execute('SELECT name FROM cmds')
self.send_message(message.channel,
"""py2discord is a Discord chat bot written in Python
by https://github.com/shiburizu/""" % ', '.join([str(i[0])for i in commands]))
elif message.content.startswith('$blacklist '):
try:
p = self.isWhitelisted(message,'1') #check whitelist 1
if p == True:
insert = (message.content[13:].replace('>','')).split(' ', 1)
try:
if insert[1].isdigit():
print insert
if int(insert[1]) > maxblackrank:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
else:
c.execute('INSERT OR REPLACE INTO blacklist(id, level) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
'Successfully blacklisted ID %s at level %s.' % (insert[0],insert[1]))
else:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
except IndexError:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$whitelist '):
try:
p = self.isWhitelisted(message,'2') #check whitelist 2
if p == True:
insert = (message.content[13:].replace('>','')).split(' ', 1)
try:
if insert[1].isdigit():
print insert
if int(insert[1]) > maxwhiterank:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
else:
c.execute('INSERT OR REPLACE INTO whitelist(id, level) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
'Successfully whitelisted ID %s at level %s.' % (insert[0],insert[1]))
else:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
except IndexError:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$image '):
try:
p = self.isBlacklisted(message,'1') #check blacklist 1
if p == False:
query = message.content[7:]
if query != '':
res = service.cse().list(
q=query,
cx='INSERT CX KEY FROM CSE.GOOGLE.COM',
searchType='image',
num=10,
safe='off'
).execute()
if not 'items' in res:
self.send_message(message.channel, "No image found.")
else:
results = []
for item in res['items']:
results.append(item['link'])
self.send_message(message.channel, random.choice(results))
else:
self.send_message(message.channel,'Please input search terms.')
except apiclient.errors.HttpError as e:
self.send_message(message.channel,
"There was a problem with your request. Here is some information:```%s```" % e)
elif message.content.startswith('$gif '):
try:
p = self.isBlacklisted(message,'1') #check blacklist 1
if p == False:
query = message.content[7:]
if query != '' or None:
res = service.cse().list(
q=query,
cx='INSERT CX KEY FROM CSE.GOOGLE.COM',
searchType='image',
fileType='gif',
num=10,
safe='off'
).execute()
if not 'items' in res:
self.send_message(message.channel, "No image found.")
else:
results = []
for item in res['items']:
results.append(item['link'])
self.send_message(message.channel, random.choice(results))
else:
self.send_message(message.channel,'Please input search terms.')
except apiclient.errors.HttpError as e:
self.send_message(message.channel,
"There was a problem with your request. Here is some information:```%s```" % e)
elif message.content.startswith('$add '):
try:
p = self.isBlacklisted(message,'2')
if p == False:
insert = (message.content[5:].encode('utf-8')).split(' ', 1)
if not insert in blacklistwords:
print insert
c.execute('INSERT OR ABORT INTO cmds(name,cmd) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
"Command added.")
else:
self.send_message(message.channel,
"This is a blacklisted word, and cannot be added.")
except sql.IntegrityError:
self.send_message(message.channel, "Already exists. Aborted.")
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$talk '):
reply = talk.ask(message.content[6:])
print "Was asked:", message.content[6:], "Replied with:", reply
self.send_message(message.channel, reply)
elif message.content.startswith('$permissions'):
c.execute('SELECT level FROM whitelist WHERE id = ?', (message.author.id,))
white = c.fetchone()
if not white:
white = 0
else:
white = white[0][0]
c.execute('SELECT level FROM blacklist WHERE id = ?', (message.author.id,))
black = c.fetchone()
if not black:
black = 0
else:
black = black[0][0]
self.send_message(message.channel,
'%s, your Discord ID is %s. Your whitelist level is %s and blacklist level is %s.' % (
message.author.name,message.author.id,white,black))
elif message.content.startswith('$'):
try:
c.execute("SELECT cmd FROM cmds WHERE name = ?",
(message.content[1:],))
fetch = c.fetchone()
self.send_message(message.channel, fetch[0])
except TypeError:
pass
talk = cleverbot.Cleverbot()
bot = client()
db = sql.connect('commands.db')
db.text_factory = str
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS cmds(name VARCHAR(10) UNIQUE,
cmd VARCHAR(64));''')
c.execute('''CREATE TABLE IF NOT EXISTS blacklist(id VARCHAR(10) UNIQUE,
level VARCHAR(64));''')
c.execute('''CREATE TABLE IF NOT EXISTS whitelist(id VARCHAR(10) UNIQUE,
level VARCHAR(64));''')
db.commit()
bot.login('EMAIL','PASSWORD')
bot.run()
| isc |
icexelloss/spark | python/pyspark/rddsampler.py | 157 | 4250 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import math
class RDDSamplerBase(object):
def __init__(self, withReplacement, seed=None):
self._seed = seed if seed is not None else random.randint(0, sys.maxsize)
self._withReplacement = withReplacement
self._random = None
def initRandomGenerator(self, split):
self._random = random.Random(self._seed ^ split)
# mixing because the initial seeds are close to each other
for _ in range(10):
self._random.randint(0, 1)
def getUniformSample(self):
return self._random.random()
def getPoissonSample(self, mean):
# Using Knuth's algorithm described in
# http://en.wikipedia.org/wiki/Poisson_distribution
if mean < 20.0:
# one exp and k+1 random calls
l = math.exp(-mean)
p = self._random.random()
k = 0
while p > l:
k += 1
p *= self._random.random()
else:
# switch to the log domain, k+1 expovariate (random + log) calls
p = self._random.expovariate(mean)
k = 0
while p < 1.0:
k += 1
p += self._random.expovariate(mean)
return k
def func(self, split, iterator):
raise NotImplementedError
class RDDSampler(RDDSamplerBase):
def __init__(self, withReplacement, fraction, seed=None):
RDDSamplerBase.__init__(self, withReplacement, seed)
self._fraction = fraction
def func(self, split, iterator):
self.initRandomGenerator(split)
if self._withReplacement:
for obj in iterator:
# For large datasets, the expected number of occurrences of each element in
# a sample with replacement is Poisson(frac). We use that to get a count for
# each element.
count = self.getPoissonSample(self._fraction)
for _ in range(0, count):
yield obj
else:
for obj in iterator:
if self.getUniformSample() < self._fraction:
yield obj
class RDDRangeSampler(RDDSamplerBase):
def __init__(self, lowerBound, upperBound, seed=None):
RDDSamplerBase.__init__(self, False, seed)
self._lowerBound = lowerBound
self._upperBound = upperBound
def func(self, split, iterator):
self.initRandomGenerator(split)
for obj in iterator:
if self._lowerBound <= self.getUniformSample() < self._upperBound:
yield obj
class RDDStratifiedSampler(RDDSamplerBase):
def __init__(self, withReplacement, fractions, seed=None):
RDDSamplerBase.__init__(self, withReplacement, seed)
self._fractions = fractions
def func(self, split, iterator):
self.initRandomGenerator(split)
if self._withReplacement:
for key, val in iterator:
# For large datasets, the expected number of occurrences of each element in
# a sample with replacement is Poisson(frac). We use that to get a count for
# each element.
count = self.getPoissonSample(self._fractions[key])
for _ in range(0, count):
yield key, val
else:
for key, val in iterator:
if self.getUniformSample() < self._fractions[key]:
yield key, val
| apache-2.0 |
adrienbrault/home-assistant | homeassistant/components/powerwall/sensor.py | 3 | 4166 | """Support for August sensors."""
import logging
from tesla_powerwall import MeterType
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_BATTERY, DEVICE_CLASS_POWER, PERCENTAGE
from .const import (
ATTR_ENERGY_EXPORTED,
ATTR_ENERGY_IMPORTED,
ATTR_FREQUENCY,
ATTR_INSTANT_AVERAGE_VOLTAGE,
ATTR_IS_ACTIVE,
DOMAIN,
ENERGY_KILO_WATT,
POWERWALL_API_CHARGE,
POWERWALL_API_DEVICE_TYPE,
POWERWALL_API_METERS,
POWERWALL_API_SERIAL_NUMBERS,
POWERWALL_API_SITE_INFO,
POWERWALL_API_STATUS,
POWERWALL_COORDINATOR,
)
from .entity import PowerWallEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the August sensors."""
powerwall_data = hass.data[DOMAIN][config_entry.entry_id]
_LOGGER.debug("Powerwall_data: %s", powerwall_data)
coordinator = powerwall_data[POWERWALL_COORDINATOR]
site_info = powerwall_data[POWERWALL_API_SITE_INFO]
device_type = powerwall_data[POWERWALL_API_DEVICE_TYPE]
status = powerwall_data[POWERWALL_API_STATUS]
powerwalls_serial_numbers = powerwall_data[POWERWALL_API_SERIAL_NUMBERS]
entities = []
for meter in MeterType:
entities.append(
PowerWallEnergySensor(
meter,
coordinator,
site_info,
status,
device_type,
powerwalls_serial_numbers,
)
)
entities.append(
PowerWallChargeSensor(
coordinator, site_info, status, device_type, powerwalls_serial_numbers
)
)
async_add_entities(entities, True)
class PowerWallChargeSensor(PowerWallEntity, SensorEntity):
"""Representation of an Powerwall charge sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
@property
def name(self):
"""Device Name."""
return "Powerwall Charge"
@property
def device_class(self):
"""Device Class."""
return DEVICE_CLASS_BATTERY
@property
def unique_id(self):
"""Device Uniqueid."""
return f"{self.base_unique_id}_charge"
@property
def state(self):
"""Get the current value in percentage."""
return round(self.coordinator.data[POWERWALL_API_CHARGE])
class PowerWallEnergySensor(PowerWallEntity, SensorEntity):
"""Representation of an Powerwall Energy sensor."""
def __init__(
self,
meter: MeterType,
coordinator,
site_info,
status,
device_type,
powerwalls_serial_numbers,
):
"""Initialize the sensor."""
super().__init__(
coordinator, site_info, status, device_type, powerwalls_serial_numbers
)
self._meter = meter
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return ENERGY_KILO_WATT
@property
def name(self):
"""Device Name."""
return f"Powerwall {self._meter.value.title()} Now"
@property
def device_class(self):
"""Device Class."""
return DEVICE_CLASS_POWER
@property
def unique_id(self):
"""Device Uniqueid."""
return f"{self.base_unique_id}_{self._meter.value}_instant_power"
@property
def state(self):
"""Get the current value in kW."""
return (
self.coordinator.data[POWERWALL_API_METERS]
.get_meter(self._meter)
.get_power(precision=3)
)
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
meter = self.coordinator.data[POWERWALL_API_METERS].get_meter(self._meter)
return {
ATTR_FREQUENCY: round(meter.frequency, 1),
ATTR_ENERGY_EXPORTED: meter.get_energy_exported(),
ATTR_ENERGY_IMPORTED: meter.get_energy_imported(),
ATTR_INSTANT_AVERAGE_VOLTAGE: round(meter.avarage_voltage, 1),
ATTR_IS_ACTIVE: meter.is_active(),
}
| mit |
Serag8/Bachelor | google_appengine/lib/django-1.5/django/middleware/cache.py | 37 | 9386 | """
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a simple
and effective way of avoiding the caching of the Django admin (and any other
user-specific content).
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import get_cache, DEFAULT_CACHE_ALIAS
from django.utils.cache import (get_cache_key, get_max_age, has_vary_header,
learn_cache_key, patch_response_headers)
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = get_cache(self.cache_alias)
def _session_accessed(self, request):
try:
return request.session.accessed
except AttributeError:
return False
def _should_update_cache(self, request, response):
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
return False
# If the session has not been accessed otherwise, we don't want to
# cause it to be accessed here. If it hasn't been accessed, then the
# user's logged-in status has not affected the response anyway.
if self.cache_anonymous_only and self._session_accessed(request):
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if request.user.is_authenticated():
# Don't cache user-variable requests from authenticated users.
return False
return True
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code != 200:
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = get_cache(self.cache_alias)
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if not request.method in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key, None)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, cache_anonymous_only=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
cache_kwargs = {}
try:
self.key_prefix = kwargs['key_prefix']
if self.key_prefix is not None:
cache_kwargs['KEY_PREFIX'] = self.key_prefix
else:
self.key_prefix = ''
except KeyError:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_kwargs['KEY_PREFIX'] = self.key_prefix
try:
self.cache_alias = kwargs['cache_alias']
if self.cache_alias is None:
self.cache_alias = DEFAULT_CACHE_ALIAS
if cache_timeout is not None:
cache_kwargs['TIMEOUT'] = cache_timeout
except KeyError:
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
if cache_timeout is None:
cache_kwargs['TIMEOUT'] = settings.CACHE_MIDDLEWARE_SECONDS
else:
cache_kwargs['TIMEOUT'] = cache_timeout
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
self.cache = get_cache(self.cache_alias, **cache_kwargs)
self.cache_timeout = self.cache.default_timeout
| mit |
LMSlay/wiper | modules/clamav.py | 1 | 2875 | # This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import getopt
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
from viper.common.out import *
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def run(self):
def usage():
self.log('', "usage: clamav [-h] [-s]")
def help():
usage()
self.log('', "")
self.log('', "Options:")
self.log('', "\t--help (-h)\tShow this help message")
self.log('', "\t--socket(-s)\tSpecify an unix socket (default: Clamd Unix Socket)")
self.log('', "")
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install requests (`pip install pyclamd`)")
return
try:
opts, argv = getopt.getopt(self.args, 'hs:', ['help', 'socket='])
except getopt.GetoptError as e:
self.log('', e)
usage()
return
daemon = None
result = None
socket = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-s', '--socket'):
self.log('info', "Using socket {0} to connect to ClamAV daemon".format(value))
socket = value
try:
daemon = pyclamd.ClamdUnixSocket(socket)
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
try:
if not daemon:
daemon = pyclamd.ClamdUnixSocket()
socket = 'Clamav'
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
try:
if daemon.ping():
results = daemon.scan_file(__sessions__.current.file.path)
else:
self.log('error', "Unable to connect to the daemon")
except Exception as e:
self.log('error', "Unable to scan with antivirus daemon, {0}".format(e))
return
found = None
name = 'not found'
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
self.log('info', "Daemon {0} returns: {1}".format(socket, name))
| bsd-3-clause |
guymakam/Kodi-Israel | plugin.video.MakoTV/resources/lib/crypto/app/filecrypt.py | 7 | 3051 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
""" cipher.app.filecrypt
File encryption script.
Current uses an 'extended' AES algorithm.
2002 by Paul A. Lambert
Read LICENSE.txt for license information.
"""
import sys, getpass, getopt, os
from crypto.cipher.trolldoll import Trolldoll
from crypto.errors import DecryptNotBlockAlignedError
from binascii_plus import *
def main():
""" Main is the command line interface to filecrypt """
path, progName = os.path.split(sys.argv[0])
usage = """Usage: %s [-d | -e][a][?] [-k <passPhrase>] [-i <infile>] [-o <outfile>]\n""" % progName
try:
# use get opt to parse and validate command line
optlist, args = getopt.getopt( sys.argv[1:], 'edk:i:o:' )
except getopt.GetoptError, err :
sys.exit( "Error: %s\n%s" % (err,usage) )
print optlist,'\n------\n',args
# make a dictionary and check for one occurance of each option
optdict = {}
for option in optlist:
if not optdict.has_key(option[0]):
optdict[option[0]] = option[1]
else:
sys.exit( "Error: duplicate option '%s'\n%s" % (option[0],usage) )
if optdict.has_key('-e') and optdict.has_key('-d'):
sys.exit( "Error: Can not do both encrypt and decrypt, pick either '-e' or '-d'\n%s" % usage )
if not(optdict.has_key('-e') or optdict.has_key('-d')):
sys.exit( "Error: Must select encrypt or decrypt, pick either '-e' or '-d'\n%s" % usage )
# determine the passphrase from the command line or by keyboard input
if optdict.has_key('-k'):
passPhrase = optdict['-k']
else:
passPhrase = getpass.getpass('Key: ')
# should really test for a good passphrase ...................
# get input from file or stdin
if optdict.has_key('-i'):
infile = open(optdict['-i'],'rb')
input = infile.read()
else:
input = sys.stdin.read()
print "input (%d bytes): %s" % (len(input),b2a_pt(input))
alg=Trolldoll(ivSize=160)
alg.setPassphrase( passPhrase )
# Encrypt or decrypt depending on the option selected
if optdict.has_key('-e'):
output = alg.encrypt(input)
elif optdict.has_key('-d'):
try:
output = alg.decrypt(input)
except DecryptNotBlockAlignedError, errMessage :
sys.exit("""Error: %s\n Note this can be caused by inappropriate modification \n of binary files (Win issue with CR/LFs). Try -a mode. """ % errMessage )
# should check for integrity failure
else:
sys.exit( "Error: Must select encrypt or decrypt, pick either '-e' or '-d'\n%s" % usage )
print "output (%d bytes): %s" % (len(output),b2a_pt(output))
# put output to file or stdout
if optdict.has_key('-o'):
outfile = open(optdict['-o'],'wb')
outfile.write( output )
else:
sys.stdout.write( output )
sys.exit() # normal termination
if __name__ == "__main__":
""" Called when run from the command line """
main()
| gpl-2.0 |
eicher31/compassion-modules | thankyou_letters/models/partner_communication.py | 3 | 6485 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models, fields
class CommunicationDefaults(models.AbstractModel):
_inherit = 'partner.communication.defaults'
print_subject = fields.Boolean(default=True)
print_header = fields.Boolean()
show_signature = fields.Boolean()
add_success_story = fields.Boolean()
class PartnerCommunication(models.Model):
_inherit = 'partner.communication.job'
##########################################################################
# FIELDS #
##########################################################################
success_story_id = fields.Many2one(
'success.story', 'Success Story', domain=[('type', '=', 'story')])
success_sentence_id = fields.Many2one(
'success.story', 'Success Sentence',
domain=[('type', '=', 'sentence')])
success_sentence = fields.Text(related='success_sentence_id.body_text')
add_success_story = fields.Boolean(related='config_id.add_success_story')
amount = fields.Float(compute='_compute_donation_amount', store=True)
@api.multi
@api.depends('object_ids')
def _compute_donation_amount(self):
for communication in self:
if communication.model == 'account.invoice.line':
try:
invoice_lines = communication.get_objects().exists()
if not invoice_lines:
continue
except ValueError:
continue
communication.amount = sum(invoice_lines
.mapped('price_subtotal'))
##########################################################################
# ORM METHODS #
##########################################################################
@api.model
def _get_default_vals(self, vals, default_vals=None):
if default_vals is None:
default_vals = []
default_vals.extend([
'print_subject', 'print_header', 'show_signature'])
return super(PartnerCommunication, self)._get_default_vals(
vals, default_vals)
##########################################################################
# PUBLIC METHODS #
##########################################################################
@api.multi
def set_success_story(self):
"""
Takes the less used active success story and attach it
to communications.
:return: True
"""
all_stories = self.env['success.story'].search([
('is_active', '=', True),
('only_when_chosen', '=', False)
])
stories = all_stories.filtered(lambda s: s.type == 'story')
sentences = all_stories.filtered(lambda s: s.type == 'sentence')
default_story = self.env.context.get('default_success_story_id')
for job in self:
# Only set success story if config is set.
if job.add_success_story and stories and not default_story:
if len(stories) == 1:
job.success_story_id = stories
else:
story, use_count = job._get_min_used_story(stories)
job.success_story_id = story
body = job.with_context(
lang=job.partner_id.lang).email_template_id.body_html
if sentences and body and 'object.success_sentence' in body:
if len(sentences) == 1:
job.success_sentence_id = sentences
else:
sentence, use_count = job._get_min_used_story(sentences)
if use_count < 5:
job.success_sentence_id = sentence
return True
@api.multi
def refresh_text(self, refresh_uid=False):
"""
Refresh the success story as well
:param refresh_uid: User that refresh
:return: True
"""
for job in self:
if not job.success_story_id.only_when_chosen:
job.set_success_story()
super(PartnerCommunication, self).refresh_text(refresh_uid)
return True
@api.multi
def send(self):
"""
Update the count of succes story prints when sending a receipt.
:return: True
"""
res = super(PartnerCommunication, self).send()
for job in self.filtered('sent_date'):
if job.success_story_id:
job.success_story_id.print_count += 1
if job.success_sentence and job.success_sentence in job.body_html:
job.success_sentence_id.print_count += 1
return res
@api.multi
def _get_min_used_story(self, stories):
"""
Given success stories, returns the one that the partner has received
the least.
:param stories: <success.story> recordset
:return: <success.story> single record, <int> usage count
"""
self.ensure_one()
usage_count = dict()
type = stories.mapped('type')[0]
field = 'success_story_id' if type == 'story' else \
'success_sentence_id'
# Put the least used stories at end of list to choose them in case
# of equality use for a partner.
stories = reversed(stories.sorted(lambda s: s.current_usage_count))
for s in stories:
usage = self.search_count([
('partner_id', '=', self.partner_id.id),
(field, '=', s.id)
])
usage_count[usage] = s
min_used = min(usage_count.keys())
return usage_count[min_used], min_used
class HrDepartment(models.Model):
_inherit = 'hr.department'
# Translate name of department for signatures
name = fields.Char(translate=True)
class ResCompany(models.Model):
_inherit = 'res.company'
# Translate name of Company for signatures
address_name = fields.Char(translate=True)
| agpl-3.0 |
susansls/zulip | zerver/management/commands/knight.py | 15 | 2864 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from zerver.lib.actions import do_change_is_admin
from zerver.models import UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-f', '--for-real',
dest='ack',
action="store_true",
default=False,
help='Acknowledgement that this is done according to policy.')
parser.add_argument('--revoke',
dest='grant',
action="store_false",
default=True,
help='Remove an administrator\'s rights.')
parser.add_argument('--permission',
dest='permission',
action="store",
default='administer',
help='Permission to grant/remove.')
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to knight")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
email = options['email']
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if options['grant']:
if profile.has_perm(options['permission'], profile.realm):
raise CommandError("User already has permission for this realm.")
else:
if options['ack']:
do_change_is_admin(profile, True, permission=options['permission'])
print("Done!")
else:
print("Would have granted %s %s rights for %s" % (
email, options['permission'], profile.realm.string_id))
else:
if profile.has_perm(options['permission'], profile.realm):
if options['ack']:
do_change_is_admin(profile, False, permission=options['permission'])
print("Done!")
else:
print("Would have removed %s's %s rights on %s" % (email, options['permission'],
profile.realm.string_id))
else:
raise CommandError("User did not have permission for this realm!")
| apache-2.0 |
sunlianqiang/kbengine | kbe/src/lib/python/Tools/demo/vector.py | 110 | 1452 | #!/usr/bin/env python3
"""
A demonstration of classes and their special methods in Python.
"""
class Vec:
"""A simple vector class.
Instances of the Vec class can be constructed from numbers
>>> a = Vec(1, 2, 3)
>>> b = Vec(3, 2, 1)
added
>>> a + b
Vec(4, 4, 4)
subtracted
>>> a - b
Vec(-2, 0, 2)
and multiplied by a scalar on the left
>>> 3.0 * a
Vec(3.0, 6.0, 9.0)
or on the right
>>> a * 3.0
Vec(3.0, 6.0, 9.0)
"""
def __init__(self, *v):
self.v = list(v)
@classmethod
def fromlist(cls, v):
if not isinstance(v, list):
raise TypeError
inst = cls()
inst.v = v
return inst
def __repr__(self):
args = ', '.join(repr(x) for x in self.v)
return 'Vec({})'.format(args)
def __len__(self):
return len(self.v)
def __getitem__(self, i):
return self.v[i]
def __add__(self, other):
# Element-wise addition
v = [x + y for x, y in zip(self.v, other.v)]
return Vec.fromlist(v)
def __sub__(self, other):
# Element-wise subtraction
v = [x - y for x, y in zip(self.v, other.v)]
return Vec.fromlist(v)
def __mul__(self, scalar):
# Multiply by scalar
v = [x * scalar for x in self.v]
return Vec.fromlist(v)
__rmul__ = __mul__
def test():
import doctest
doctest.testmod()
test()
| lgpl-3.0 |
syjeon/new_edx | lms/djangoapps/courseware/migrations/0001_initial.py | 194 | 8306 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StudentModule'
db.create_table('courseware_studentmodule', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('module_type', self.gf('django.db.models.fields.CharField')(default='problem', max_length=32)),
('module_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
('student', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('state', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('grade', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('courseware', ['StudentModule'])
# Adding unique constraint on 'StudentModule', fields ['student', 'module_id', 'module_type']
db.create_unique('courseware_studentmodule', ['student_id', 'module_id', 'module_type'])
def backwards(self, orm):
# Removing unique constraint on 'StudentModule', fields ['student', 'module_id', 'module_type']
db.delete_unique('courseware_studentmodule', ['student_id', 'module_id', 'module_type'])
# Deleting model 'StudentModule'
db.delete_table('courseware_studentmodule')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('student', 'module_id', 'module_type'),)", 'object_name': 'StudentModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'module_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['courseware']
| agpl-3.0 |
vmendez/DIRAC | Resources/Storage/StorageElement.py | 1 | 35478 | """ This is the StorageElement class.
"""
from types import ListType
__RCSID__ = "$Id$"
# # custom duty
import re
import time
import datetime
import copy
import errno
# # from DIRAC
from DIRAC import gLogger, gConfig, siteName
from DIRAC.Core.Utilities import DErrno, DError
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, returnSingleResult
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
class StorageElementCache( object ):
def __init__( self ):
self.seCache = DictCache()
def __call__( self, name, protocols = None, vo = None, hideExceptions = False ):
self.seCache.purgeExpired( expiredInSeconds = 60 )
argTuple = ( name, protocols, vo )
seObj = self.seCache.get( argTuple )
if not seObj:
seObj = StorageElementItem( name, protocols, vo, hideExceptions = hideExceptions )
# Add the StorageElement to the cache for 1/2 hour
self.seCache.add( argTuple, 1800, seObj )
return seObj
class StorageElementItem( object ):
"""
.. class:: StorageElement
common interface to the grid storage element
self.name is the resolved name of the StorageElement i.e CERN-tape
self.options is dictionary containing the general options defined in the CS e.g. self.options['Backend] = 'Castor2'
self.storages is a list of the stub objects created by StorageFactory for the protocols found in the CS.
self.localPlugins is a list of the local protocols that were created by StorageFactory
self.remotePlugins is a list of the remote protocols that were created by StorageFactory
self.protocolOptions is a list of dictionaries containing the options found in the CS. (should be removed)
dynamic method :
retransferOnlineFile( lfn )
exists( lfn )
isFile( lfn )
getFile( lfn, localPath = False )
putFile( lfnLocal, sourceSize = 0 ) : {lfn:local}
replicateFile( lfn, sourceSize = 0 )
getFileMetadata( lfn )
getFileSize( lfn )
removeFile( lfn )
prestageFile( lfn, lifetime = 86400 )
prestageFileStatus( lfn )
pinFile( lfn, lifetime = 60 * 60 * 24 )
releaseFile( lfn )
isDirectory( lfn )
getDirectoryMetadata( lfn )
getDirectorySize( lfn )
listDirectory( lfn )
removeDirectory( lfn, recursive = False )
createDirectory( lfn )
putDirectory( lfn )
getDirectory( lfn, localPath = False )
"""
__deprecatedArguments = ["singleFile", "singleDirectory"] # Arguments that are now useless
# Some methods have a different name in the StorageElement and the plugins...
# We could avoid this static list in the __getattr__ by checking the storage plugin and so on
# but fine... let's not be too smart, otherwise it becomes unreadable :-)
__equivalentMethodNames = {"exists" : "exists",
"isFile" : "isFile",
"getFile" : "getFile",
"putFile" : "putFile",
"replicateFile" : "putFile",
"getFileMetadata" : "getFileMetadata",
"getFileSize" : "getFileSize",
"removeFile" : "removeFile",
"prestageFile" : "prestageFile",
"prestageFileStatus" : "prestageFileStatus",
"pinFile" : "pinFile",
"releaseFile" : "releaseFile",
"isDirectory" : "isDirectory",
"getDirectoryMetadata" : "getDirectoryMetadata",
"getDirectorySize" : "getDirectorySize",
"listDirectory" : "listDirectory",
"removeDirectory" : "removeDirectory",
"createDirectory" : "createDirectory",
"putDirectory" : "putDirectory",
"getDirectory" : "getDirectory",
}
# We can set default argument in the __executeFunction which impacts all plugins
__defaultsArguments = {"putFile" : {"sourceSize" : 0 },
"getFile": { "localPath": False },
"prestageFile" : { "lifetime" : 86400 },
"pinFile" : { "lifetime" : 60 * 60 * 24 },
"removeDirectory" : { "recursive" : False },
"getDirectory" : { "localPath" : False },
}
def __init__( self, name, plugins = None, vo = None, hideExceptions = False ):
""" c'tor
:param str name: SE name
:param list plugins: requested storage plugins
:param vo
"""
self.methodName = None
if vo:
self.vo = vo
else:
result = getVOfromProxyGroup()
if not result['OK']:
return
self.vo = result['Value']
self.opHelper = Operations( vo = self.vo )
proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
useProxy = ( gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "UnknownProtocol" )
in proxiedProtocols )
if not useProxy:
useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
if not useProxy:
useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )
self.valid = True
if plugins == None:
res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = [], hideExceptions = hideExceptions )
else:
res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = plugins, hideExceptions = hideExceptions )
if not res['OK']:
self.valid = False
self.name = name
self.errorReason = res['Message']
else:
factoryDict = res['Value']
self.name = factoryDict['StorageName']
self.options = factoryDict['StorageOptions']
self.localPlugins = factoryDict['LocalPlugins']
self.remotePlugins = factoryDict['RemotePlugins']
self.storages = factoryDict['StorageObjects']
self.protocolOptions = factoryDict['ProtocolOptions']
self.turlProtocols = factoryDict['TurlProtocols']
for storage in self.storages:
storage.setStorageElement( self )
self.log = gLogger.getSubLogger( "SE[%s]" % self.name )
self.useCatalogURL = gConfig.getValue( '/Resources/StorageElements/%s/UseCatalogURL' % self.name, False )
# 'getTransportURL',
self.readMethods = [ 'getFile',
'prestageFile',
'prestageFileStatus',
'getDirectory']
self.writeMethods = [ 'retransferOnlineFile',
'putFile',
'replicateFile',
'pinFile',
'releaseFile',
'createDirectory',
'putDirectory' ]
self.removeMethods = [ 'removeFile', 'removeDirectory' ]
self.checkMethods = [ 'exists',
'getDirectoryMetadata',
'getDirectorySize',
'getFileSize',
'getFileMetadata',
'listDirectory',
'isDirectory',
'isFile',
]
self.okMethods = [ 'getLocalProtocols',
'getProtocols',
'getRemoteProtocols',
'getStorageElementName',
'getStorageParameters',
'getTransportURL',
'isLocalSE' ]
self.__fileCatalog = None
def dump( self ):
""" Dump to the logger a summary of the StorageElement items. """
log = self.log.getSubLogger( 'dump', True )
log.verbose( "Preparing dump for StorageElement %s." % self.name )
if not self.valid:
log.debug( "Failed to create StorageElement plugins.", self.errorReason )
return
i = 1
outStr = "\n\n============ Options ============\n"
for key in sorted( self.options ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), self.options[key] )
for storage in self.storages:
outStr = "%s============Protocol %s ============\n" % ( outStr, i )
storageParameters = storage.getParameters()
for key in sorted( storageParameters ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), storageParameters[key] )
i = i + 1
log.verbose( outStr )
#################################################################################################
#
# These are the basic get functions for storage configuration
#
def getStorageElementName( self ):
""" SE name getter """
self.log.getSubLogger( 'getStorageElementName' ).verbose( "The Storage Element name is %s." % self.name )
return S_OK( self.name )
def getChecksumType( self ):
""" get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
global /Resources/StorageElements/ChecksumType
"""
self.log.getSubLogger( 'getChecksumType' ).verbose( "get checksum type for %s." % self.name )
return S_OK( str( gConfig.getValue( "/Resources/StorageElements/ChecksumType", "ADLER32" ) ).upper()
if "ChecksumType" not in self.options else str( self.options["ChecksumType"] ).upper() )
def getStatus( self ):
"""
Return Status of the SE, a dictionary with:
- Read: True (is allowed), False (it is not allowed)
- Write: True (is allowed), False (it is not allowed)
- Remove: True (is allowed), False (it is not allowed)
- Check: True (is allowed), False (it is not allowed).
NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
- DiskSE: True if TXDY with Y > 0 (defaults to True)
- TapeSE: True if TXDY with X > 0 (defaults to False)
- TotalCapacityTB: float (-1 if not defined)
- DiskCacheTB: float (-1 if not defined)
"""
self.log.getSubLogger( 'getStatus' ).verbose( "determining status of %s." % self.name )
retDict = {}
if not self.valid:
retDict['Read'] = False
retDict['Write'] = False
retDict['Remove'] = False
retDict['Check'] = False
retDict['DiskSE'] = False
retDict['TapeSE'] = False
retDict['TotalCapacityTB'] = -1
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
# If nothing is defined in the CS Access is allowed
# If something is defined, then it must be set to Active
retDict['Read'] = not ( 'ReadAccess' in self.options and self.options['ReadAccess'] not in ( 'Active', 'Degraded' ) )
retDict['Write'] = not ( 'WriteAccess' in self.options and self.options['WriteAccess'] not in ( 'Active', 'Degraded' ) )
retDict['Remove'] = not ( 'RemoveAccess' in self.options and self.options['RemoveAccess'] not in ( 'Active', 'Degraded' ) )
if retDict['Read']:
retDict['Check'] = True
else:
retDict['Check'] = not ( 'CheckAccess' in self.options and self.options['CheckAccess'] not in ( 'Active', 'Degraded' ) )
diskSE = True
tapeSE = False
if 'SEType' in self.options:
# Type should follow the convention TXDY
seType = self.options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
retDict['DiskSE'] = diskSE
retDict['TapeSE'] = tapeSE
try:
retDict['TotalCapacityTB'] = float( self.options['TotalCapacityTB'] )
except Exception:
retDict['TotalCapacityTB'] = -1
try:
retDict['DiskCacheTB'] = float( self.options['DiskCacheTB'] )
except Exception:
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
def isValid( self, operation = '' ):
""" check CS/RSS statuses for :operation:
:param str operation: operation name
"""
log = self.log.getSubLogger( 'isValid', True )
log.verbose( "Determining if the StorageElement %s is valid for VO %s" % ( self.name, self.vo ) )
if not self.valid:
log.debug( "Failed to create StorageElement plugins.", self.errorReason )
return S_ERROR( "SE.isValid: Failed to create StorageElement plugins: %s" % self.errorReason )
# Check if the Storage Element is eligible for the user's VO
if 'VO' in self.options and not self.vo in self.options['VO']:
log.debug( "StorageElement is not allowed for VO", self.vo )
return DError( errno.EACCES, "StorageElement.isValid: StorageElement is not allowed for VO" )
log.verbose( "Determining if the StorageElement %s is valid for %s" % ( self.name, operation ) )
if ( not operation ) or ( operation in self.okMethods ):
return S_OK()
# Determine whether the StorageElement is valid for checking, reading, writing
res = self.getStatus()
if not res[ 'OK' ]:
log.debug( "Could not call getStatus", res['Message'] )
return S_ERROR( "SE.isValid could not call the getStatus method" )
checking = res[ 'Value' ][ 'Check' ]
reading = res[ 'Value' ][ 'Read' ]
writing = res[ 'Value' ][ 'Write' ]
removing = res[ 'Value' ][ 'Remove' ]
# Determine whether the requested operation can be fulfilled
if ( not operation ) and ( not reading ) and ( not writing ) and ( not checking ):
log.debug( "Read, write and check access not permitted." )
return DError( errno.EACCES, "SE.isValid: Read, write and check access not permitted." )
# The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
if ( operation in self.readMethods ) or ( operation.lower() in ( 'read', 'readaccess' ) ):
operation = 'ReadAccess'
elif operation in self.writeMethods or ( operation.lower() in ( 'write', 'writeaccess' ) ):
operation = 'WriteAccess'
elif operation in self.removeMethods or ( operation.lower() in ( 'remove', 'removeaccess' ) ):
operation = 'RemoveAccess'
elif operation in self.checkMethods or ( operation.lower() in ( 'check', 'checkaccess' ) ):
operation = 'CheckAccess'
else:
log.debug( "The supplied operation is not known.", operation )
return DError( DErrno.ENOMETH , "SE.isValid: The supplied operation is not known." )
log.debug( "check the operation: %s " % operation )
# Check if the operation is valid
if operation == 'CheckAccess':
if not reading:
if not checking:
log.debug( "Check access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Check access not currently permitted." )
if operation == 'ReadAccess':
if not reading:
log.debug( "Read access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Read access not currently permitted." )
if operation == 'WriteAccess':
if not writing:
log.debug( "Write access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Write access not currently permitted." )
if operation == 'RemoveAccess':
if not removing:
log.debug( "Remove access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Remove access not currently permitted." )
return S_OK()
def getPlugins( self ):
""" Get the list of all the plugins defined for this Storage Element
"""
self.log.getSubLogger( 'getPlugins' ).verbose( "Obtaining all plugins of %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
allPlugins = self.localPlugins + self.remotePlugins
return S_OK( allPlugins )
def getRemotePlugins( self ):
""" Get the list of all the remote access protocols defined for this Storage Element
"""
self.log.getSubLogger( 'getRemotePlugins' ).verbose( "Obtaining remote protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.remotePlugins )
def getLocalPlugins( self ):
""" Get the list of all the local access protocols defined for this Storage Element
"""
self.log.getSubLogger( 'getLocalPlugins' ).verbose( "Obtaining local protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.localPlugins )
def getStorageParameters( self, plugin ):
""" Get plugin specific options
:param plugin : plugin we are interested in
"""
log = self.log.getSubLogger( 'getStorageParameters' )
log.verbose( "Obtaining storage parameters for %s plugin %s." % ( self.name,
plugin ) )
res = self.getPlugins()
if not res['OK']:
return res
availablePlugins = res['Value']
if not plugin in availablePlugins:
errStr = "Requested plugin not available for SE."
log.debug( errStr, '%s for %s' % ( plugin, self.name ) )
return S_ERROR( errStr )
for storage in self.storages:
storageParameters = storage.getParameters()
if storageParameters['PluginName'] == plugin:
return S_OK( storageParameters )
errStr = "Requested plugin supported but no object found."
log.debug( errStr, "%s for %s" % ( plugin, self.name ) )
return S_ERROR( errStr )
def negociateProtocolWithOtherSE( self, sourceSE, protocols = None ):
""" Negotiate what protocol could be used for a third party transfer
between the sourceSE and ourselves. If protocols is given,
the chosen protocol has to be among those
:param sourceSE : storageElement instance of the sourceSE
:param protocols: protocol restriction list
:return a list protocols that fits the needs, or None
"""
# We should actually separate source and destination protocols
# For example, an SRM can get as a source an xroot or gsiftp url...
# but with the current implementation, we get only srm
destProtocols = set( [destStorage.protocolParameters['Protocol'] for destStorage in self.storages] )
sourceProtocols = set( [sourceStorage.protocolParameters['Protocol'] for sourceStorage in sourceSE.storages] )
commonProtocols = destProtocols & sourceProtocols
if protocols:
protocols = set( list( protocols ) ) if protocols else set()
commonProtocols = commonProtocols & protocols
return S_OK( list( commonProtocols ) )
#################################################################################################
#
# These are the basic get functions for lfn manipulation
#
def __getURLPath( self, url ):
""" Get the part of the URL path below the basic storage path.
This path must coincide with the LFN of the file in order to be compliant with the DIRAC conventions.
"""
log = self.log.getSubLogger( '__getURLPath' )
log.verbose( "Getting path from url in %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
res = pfnparse( url )
if not res['OK']:
return res
fullURLPath = '%s/%s' % ( res['Value']['Path'], res['Value']['FileName'] )
# Check all available storages and check whether the url is for that protocol
urlPath = ''
for storage in self.storages:
res = storage.isNativeURL( url )
if res['OK']:
if res['Value']:
parameters = storage.getParameters()
saPath = parameters['Path']
if not saPath:
# If the sa path doesn't exist then the url path is the entire string
urlPath = fullURLPath
else:
if re.search( saPath, fullURLPath ):
# Remove the sa path from the fullURLPath
urlPath = fullURLPath.replace( saPath, '' )
if urlPath:
return S_OK( urlPath )
# This should never happen. DANGER!!
errStr = "Failed to get the url path for any of the protocols!!"
log.debug( errStr )
return S_ERROR( errStr )
def getLFNFromURL( self, urls ):
""" Get the LFN from the PFNS .
:param lfn : input lfn or lfns (list/dict)
"""
result = checkArgumentFormat( urls )
if result['OK']:
urlDict = result['Value']
else:
errStr = "Supplied urls must be string, list of strings or a dictionary."
self.log.getSubLogger( 'getLFNFromURL' ).debug( errStr )
return DError( errno.EINVAL, errStr )
retDict = { "Successful" : {}, "Failed" : {} }
for url in urlDict:
res = self.__getURLPath( url )
if res["OK"]:
retDict["Successful"][url] = res["Value"]
else:
retDict["Failed"][url] = res["Message"]
return S_OK( retDict )
###########################################################################################
#
# This is the generic wrapper for file operations
#
def getURL( self, lfn, protocol = False, replicaDict = None ):
""" execute 'getTransportURL' operation.
:param str lfn: string, list or dictionary of lfns
:param protocol: if no protocol is specified, we will request self.turlProtocols
:param replicaDict: optional results from the File Catalog replica query
"""
self.log.getSubLogger( 'getURL' ).verbose( "Getting accessUrl %s for lfn in %s." % ( "(%s)" % protocol if protocol else "", self.name ) )
if not protocol:
protocols = self.turlProtocols
elif type( protocol ) is ListType:
protocols = protocol
elif type( protocol ) == type( '' ):
protocols = [protocol]
self.methodName = "getTransportURL"
result = self.__executeMethod( lfn, protocols = protocols )
return result
def __isLocalSE( self ):
""" Test if the Storage Element is local in the current context
"""
self.log.getSubLogger( 'LocalSE' ).verbose( "Determining whether %s is a local SE." % self.name )
import DIRAC
localSEs = getSEsForSite( DIRAC.siteName() )['Value']
if self.name in localSEs:
return S_OK( True )
else:
return S_OK( False )
def __getFileCatalog( self ):
if not self.__fileCatalog:
self.__fileCatalog = FileCatalog( vo = self.vo )
return self.__fileCatalog
def __generateURLDict( self, lfns, storage, replicaDict = {} ):
""" Generates a dictionary (url : lfn ), where the url are constructed
from the lfn using the constructURLFromLFN method of the storage plugins.
:param: lfns : dictionary {lfn:whatever}
:returns dictionary {constructed url : lfn}
"""
log = self.log.getSubLogger( "__generateURLDict" )
log.verbose( "generating url dict for %s lfn in %s." % ( len( lfns ), self.name ) )
urlDict = {} # url : lfn
failed = {} # lfn : string with errors
for lfn in lfns:
if self.useCatalogURL:
# Is this self.name alias proof?
url = replicaDict.get( lfn, {} ).get( self.name, '' )
if url:
urlDict[url] = lfn
continue
else:
fc = self.__getFileCatalog()
result = fc.getReplicas()
if not result['OK']:
failed[lfn] = result['Message']
url = result['Value']['Successful'].get( lfn, {} ).get( self.name, '' )
if not url:
failed[lfn] = 'Failed to get catalog replica'
else:
# Update the URL according to the current SE description
result = returnSingleResult( storage.updateURL( url ) )
if not result['OK']:
failed[lfn] = result['Message']
else:
urlDict[result['Value']] = lfn
else:
result = storage.constructURLFromLFN( lfn, withWSUrl = True )
if not result['OK']:
errStr = result['Message']
log.debug( errStr, 'for %s' % ( lfn ) )
failed[lfn] = "%s %s" % ( failed[lfn], errStr ) if lfn in failed else errStr
else:
urlDict[result['Value']] = lfn
res = S_OK( {'Successful': urlDict, 'Failed' : failed} )
# res['Failed'] = failed
return res
def __executeMethod( self, lfn, *args, **kwargs ):
""" Forward the call to each storage in turn until one works.
The method to be executed is stored in self.methodName
:param lfn : string, list or dictionnary
:param *args : variable amount of non-keyword arguments. SHOULD BE EMPTY
:param **kwargs : keyword arguments
:returns S_OK( { 'Failed': {lfn : reason} , 'Successful': {lfn : value} } )
The Failed dict contains the lfn only if the operation failed on all the storages
The Successful dict contains the value returned by the successful storages.
"""
removedArgs = {}
log = self.log.getSubLogger( '__executeMethod' )
log.verbose( "preparing the execution of %s" % ( self.methodName ) )
# args should normaly be empty to avoid problem...
if len( args ):
log.verbose( "args should be empty!%s" % args )
# because there is normally only one kw argument, I can move it from args to kwargs
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} ).keys()
if len( methDefaultArgs ):
kwargs[methDefaultArgs[0] ] = args[0]
args = args[1:]
log.verbose( "put it in kwargs, but dirty and might be dangerous!args %s kwargs %s" % ( args, kwargs ) )
# We check the deprecated arguments
for depArg in StorageElementItem.__deprecatedArguments:
if depArg in kwargs:
log.verbose( "%s is not an allowed argument anymore. Please change your code!" % depArg )
removedArgs[depArg] = kwargs[depArg]
del kwargs[depArg]
# Set default argument if any
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} )
for argName in methDefaultArgs:
if argName not in kwargs:
log.debug( "default argument %s for %s not present.\
Setting value %s." % ( argName, self.methodName, methDefaultArgs[argName] ) )
kwargs[argName] = methDefaultArgs[argName]
res = checkArgumentFormat( lfn )
if not res['OK']:
errStr = "Supplied lfns must be string, list of strings or a dictionary."
log.debug( errStr )
return res
lfnDict = res['Value']
log.verbose( "Attempting to perform '%s' operation with %s lfns." % ( self.methodName, len( lfnDict ) ) )
res = self.isValid( operation = self.methodName )
if not res['OK']:
return res
else:
if not self.valid:
return S_ERROR( self.errorReason )
successful = {}
failed = {}
localSE = self.__isLocalSE()['Value']
# Try all of the storages one by one
for storage in self.storages:
# Determine whether to use this storage object
storageParameters = storage.getParameters()
if not storageParameters:
log.debug( "Failed to get storage parameters.", "%s %s" % ( self.name, res['Message'] ) )
continue
pluginName = storageParameters['PluginName']
if not lfnDict:
log.debug( "No lfns to be attempted for %s protocol." % pluginName )
continue
if not ( pluginName in self.remotePlugins ) and not localSE and not storage.pluginName == "Proxy":
# If the SE is not local then we can't use local protocols
log.debug( "Local protocol not appropriate for remote use: %s." % pluginName )
continue
log.verbose( "Generating %s protocol URLs for %s." % ( len( lfnDict ), pluginName ) )
replicaDict = kwargs.pop( 'replicaDict', {} )
if storage.pluginName != "Proxy":
res = self.__generateURLDict( lfnDict, storage, replicaDict = replicaDict )
urlDict = res['Value']['Successful'] # url : lfn
failed.update( res['Value']['Failed'] )
else:
urlDict = dict( [ ( lfn, lfn ) for lfn in lfnDict ] )
if not len( urlDict ):
log.verbose( "__executeMethod No urls generated for protocol %s." % pluginName )
else:
log.verbose( "Attempting to perform '%s' for %s physical files" % ( self.methodName, len( urlDict ) ) )
fcn = None
if hasattr( storage, self.methodName ) and callable( getattr( storage, self.methodName ) ):
fcn = getattr( storage, self.methodName )
if not fcn:
return DError( DErrno.ENOMETH, "SE.__executeMethod: unable to invoke %s, it isn't a member function of storage" )
urlsToUse = {} # url : the value of the lfn dictionary for the lfn of this url
for url in urlDict:
urlsToUse[url] = lfnDict[urlDict[url]]
startDate = datetime.datetime.utcnow()
startTime = time.time()
res = fcn( urlsToUse, *args, **kwargs )
elapsedTime = time.time() - startTime
self.addAccountingOperation( urlsToUse, startDate, elapsedTime, storageParameters, res )
if not res['OK']:
errStr = "Completely failed to perform %s." % self.methodName
log.debug( errStr, 'with plugin %s: %s' % ( pluginName, res['Message'] ) )
for lfn in urlDict.values():
if lfn not in failed:
failed[lfn] = ''
failed[lfn] = "%s %s" % ( failed[lfn], res['Message'] ) if failed[lfn] else res['Message']
else:
for url, lfn in urlDict.items():
if url not in res['Value']['Successful']:
if lfn not in failed:
failed[lfn] = ''
if url in res['Value']['Failed']:
self.log.debug( res['Value']['Failed'][url] )
failed[lfn] = "%s %s" % ( failed[lfn], res['Value']['Failed'][url] ) if failed[lfn] else res['Value']['Failed'][url]
else:
errStr = 'No error returned from plug-in'
failed[lfn] = "%s %s" % ( failed[lfn], errStr ) if failed[lfn] else errStr
else:
successful[lfn] = res['Value']['Successful'][url]
if lfn in failed:
failed.pop( lfn )
lfnDict.pop( lfn )
gDataStoreClient.commit()
return S_OK( { 'Failed': failed, 'Successful': successful } )
def __getattr__( self, name ):
""" Forwards the equivalent Storage calls to __executeMethod"""
# We take either the equivalent name, or the name itself
self.methodName = StorageElementItem.__equivalentMethodNames.get( name, None )
if self.methodName:
return self.__executeMethod
raise AttributeError( "StorageElement does not have a method '%s'" % name )
def addAccountingOperation( self, lfns, startDate, elapsedTime, storageParameters, callRes ):
"""
Generates a DataOperation accounting if needs to be, and adds it to the DataStore client cache
:param lfns : list of lfns on which we attempted the operation
:param startDate : datetime, start of the operation
:param elapsedTime : time (seconds) the operation took
:param storageParameters : the parameters of the plugins used to perform the operation
:param callRes : the return of the method call, S_OK or S_ERROR
The operation is generated with the OperationType "se.methodName"
The TransferSize and TransferTotal for directory methods actually take into
account the files inside the directory, and not the amount of directory given
as parameter
"""
if self.methodName not in ( self.readMethods + self.writeMethods + self.removeMethods ):
return
baseAccountingDict = {}
baseAccountingDict['OperationType'] = 'se.%s' % self.methodName
baseAccountingDict['User'] = getProxyInfo().get( 'Value', {} ).get( 'username', 'unknown' )
baseAccountingDict['RegistrationTime'] = 0.0
baseAccountingDict['RegistrationOK'] = 0
baseAccountingDict['RegistrationTotal'] = 0
# if it is a get method, then source and destination of the transfer should be inverted
if self.methodName in ( 'putFile', 'getFile' ):
baseAccountingDict['Destination'] = siteName()
baseAccountingDict[ 'Source'] = self.name
else:
baseAccountingDict['Destination'] = self.name
baseAccountingDict['Source'] = siteName()
baseAccountingDict['TransferTotal'] = 0
baseAccountingDict['TransferOK'] = 0
baseAccountingDict['TransferSize'] = 0
baseAccountingDict['TransferTime'] = 0.0
baseAccountingDict['FinalStatus'] = 'Successful'
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( baseAccountingDict )
oDataOperation.setStartTime( startDate )
oDataOperation.setEndTime( startDate + datetime.timedelta( seconds = elapsedTime ) )
oDataOperation.setValueByKey( 'TransferTime', elapsedTime )
oDataOperation.setValueByKey( 'Protocol', storageParameters.get( 'Protocol', 'unknown' ) )
if not callRes['OK']:
# Everything failed
oDataOperation.setValueByKey( 'TransferTotal', len( lfns ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
else:
succ = callRes.get( 'Value', {} ).get( 'Successful', {} )
failed = callRes.get( 'Value', {} ).get( 'Failed', {} )
totalSize = 0
# We don't take len(lfns) in order to make two
# separate entries in case of few failures
totalSucc = len( succ )
if self.methodName in ( 'putFile', 'getFile' ):
# putFile and getFile return for each entry
# in the successful dir the size of the corresponding file
totalSize = sum( succ.values() )
elif self.methodName in ( 'putDirectory', 'getDirectory' ):
# putDirectory and getDirectory return for each dir name
# a dictionnary with the keys 'Files' and 'Size'
totalSize = sum( val.get( 'Size', 0 ) for val in succ.values() if isinstance( val, dict ) )
totalSucc = sum( val.get( 'Files', 0 ) for val in succ.values() if isinstance( val, dict ) )
oDataOperation.setValueByKey( 'TransferOK', len( succ ) )
oDataOperation.setValueByKey( 'TransferSize', totalSize )
oDataOperation.setValueByKey( 'TransferTotal', totalSucc )
oDataOperation.setValueByKey( 'TransferOK', totalSucc )
if callRes['Value']['Failed']:
oDataOperationFailed = copy.deepcopy( oDataOperation )
oDataOperationFailed.setValueByKey( 'TransferTotal', len( failed ) )
oDataOperationFailed.setValueByKey( 'TransferOK', 0 )
oDataOperationFailed.setValueByKey( 'TransferSize', 0 )
oDataOperationFailed.setValueByKey( 'FinalStatus', 'Failed' )
accRes = gDataStoreClient.addRegister( oDataOperationFailed )
if not accRes['OK']:
self.log.error( "Could not send failed accounting report", accRes['Message'] )
accRes = gDataStoreClient.addRegister( oDataOperation )
if not accRes['OK']:
self.log.error( "Could not send accounting report", accRes['Message'] )
StorageElement = StorageElementCache()
| gpl-3.0 |
lukasmartinelli/py14 | py14/scope.py | 1 | 2384 | import ast
from contextlib import contextmanager
def add_scope_context(node):
"""Provide to scope context to all nodes"""
return ScopeTransformer().visit(node)
class ScopeMixin(object):
"""
Adds a scope property with the current scope (function, module)
a node is part of.
"""
scopes = []
@contextmanager
def enter_scope(self, node):
if self._is_scopable_node(node):
self.scopes.append(node)
yield
self.scopes.pop()
else:
yield
@property
def scope(self):
try:
return self.scopes[-1]
except IndexError:
return None
def _is_scopable_node(self, node):
scopes = [ast.Module, ast.FunctionDef, ast.For, ast.If, ast.With]
return len([s for s in scopes if isinstance(node, s)]) > 0
class ScopeList(list):
"""
Wraps around list of scopes and provides find method for finding
the definition of a variable
"""
def find(self, lookup):
"""Find definition of variable lookup."""
def is_match(var):
return ((isinstance(var, ast.alias) and var.name == lookup) or
(isinstance(var, ast.Name) and var.id == lookup))
def find_definition(scope, var_attr="vars"):
for var in getattr(scope, var_attr):
if is_match(var):
return var
for scope in self:
defn = find_definition(scope)
if not defn and hasattr(scope, "body_vars"):
defn = find_definition(scope, "body_vars")
if not defn and hasattr(scope, "orelse_vars"):
defn = find_definition(scope, "orelse_vars")
if defn:
return defn
def find_import(self, lookup):
for scope in reversed(self):
if hasattr(scope, "imports"):
for imp in scope.imports:
if imp.name == lookup:
return imp
class ScopeTransformer(ast.NodeTransformer, ScopeMixin):
"""
Adds a scope attribute to each node.
The scope contains the current scope (function, module, for loop)
a node is part of.
"""
def visit(self, node):
with self.enter_scope(node):
node.scopes = ScopeList(self.scopes)
return super(ScopeTransformer, self).visit(node)
| mit |
sajeeshcs/nested_quota_latest | nova/tests/unit/cert/test_rpcapi.py | 6 | 2951 | # Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
import contextlib
import mock
from oslo.config import cfg
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.NoDBTestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.cert_topic)
orig_prepare = rpcapi.client.prepare
with contextlib.nested(
mock.patch.object(rpcapi.client, 'call'),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
rpc_mock.return_value = 'foo'
csv_mock.side_effect = (
lambda v: orig_prepare().can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with()
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
| apache-2.0 |
MiLk/youtube-dl | youtube_dl/extractor/franceinter.py | 15 | 1141 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class FranceInterIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'file': '793962.mp3',
'md5': '4764932e466e6f6c79c317d2e74f6884',
"info_dict": {
"title": "L’Histoire dans les jeux vidéo",
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
path = self._search_regex(
r'&urlAOD=(.*?)&startTime', webpage, 'video url')
video_url = 'http://www.franceinter.fr/' + path
return {
'id': video_id,
'formats': [{
'url': video_url,
'vcodec': 'none',
}],
'title': title,
}
| unlicense |
syhost/android_kernel_pantech_ef50l | scripts/gcc-wrapper.py | 501 | 3410 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
wayneicn/crazyflie-clients-python | lib/cflib/crazyflie/console.py | 26 | 2060 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Crazyflie console is used to receive characters printed using printf
from the firmware.
"""
__author__ = 'Bitcraze AB'
__all__ = ['Console']
import struct
from cflib.utils.callbacks import Caller
from cflib.crtp.crtpstack import CRTPPort
class Console:
"""
Crazyflie console is used to receive characters printed using printf
from the firmware.
"""
receivedChar = Caller()
def __init__(self, crazyflie):
"""
Initialize the console and register it to receive data from the copter.
"""
self.cf = crazyflie
self.cf.add_port_callback(CRTPPort.CONSOLE, self.incoming)
def incoming(self, packet):
"""
Callback for data received from the copter.
"""
# This might be done prettier ;-)
console_text = "%s" % struct.unpack("%is" % len(packet.data),
packet.data)
self.receivedChar.call(console_text)
| gpl-2.0 |
deter-project/magi | magi/tests/023_multicastNetworkTestServer.py | 1 | 1888 | #!/usr/bin/env python
import unittest2
import logging
import time
from magi.messaging.magimessage import MAGIMessage
from magi.messaging.transportMulticast import MulticastTransport
from magi.testbed import testbed
from magi.messaging.api import Messenger
class TransportTest(unittest2.TestCase):
"""
Testing of basics in TCPTransport class
"""
def setUp(self):
#TODO: Test needs to be fixed
return
self.messenger = Messenger("testmessenger")
self.conn = MulticastTransport('239.255.1.1', 18808, testbed.controlip)
self.messenger.addTransport(self.conn, True)
self.messenger.join('multicastgroup', 'tester')
self.msgid = 1234
def sendMsg(self):
self.msgid += 1
msg = MAGIMessage()
msg.msgid = self.msgid
msg.contenttype = MAGIMessage.NONE
msg.src = "servernode"
msg.srcdock = "serverdock"
msg.dstgroups = ['multicastgroup']
msg.data = "success"
msg._routed = [self.conn.fileno()]
self.messenger.thread.sendDirect(msg)
while self.messenger.thread.pollMap[self.conn.fileno()].outmessages:
time.sleep(0.2) #waiting for message to be sent
def test_BasicRequest(self):
""" Testing multicast transport - Server """
#TODO: Test needs to be fixed
return
msg = self.messenger.nextMessage(block=True)
self.assertEqual(msg.src, "clientnode", "Source error, Excepted: clientnode, Received: " + msg.src)
self.assertEqual(msg.srcdock, "clientdock", "Dock error, Excepted: clientdock, Received: " + msg.srcdock)
self.assertEqual(msg.data, "testing", "Data error, Excepted: testing, Received: " + msg.data)
self.sendMsg()
if __name__ == '__main__':
hdlr = logging.StreamHandler()
hdlr.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s', '%m-%d %H:%M:%S'))
root = logging.getLogger()
root.handlers = []
root.addHandler(hdlr)
root.setLevel(logging.DEBUG)
unittest2.main(verbosity=2)
| gpl-2.0 |
linjoahow/2015cda_lego | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/process.py | 694 | 2304 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#for brython
from _multiprocessing import Process
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
# brython note: class Process is defined in /usr/libs/_multiprocessing.js
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| agpl-3.0 |
JioCloud/tempest | tempest/api/compute/admin/test_hosts_negative.py | 9 | 7067 | # Copyright 2013 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import test
class HostsAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests hosts API using admin privileges.
"""
@classmethod
def setup_clients(cls):
super(HostsAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hosts_client
cls.non_admin_client = cls.os.hosts_client
def _get_host_name(self):
hosts = self.client.list_hosts()
self.assertTrue(len(hosts) >= 1)
hostname = hosts[0]['host_name']
return hostname
@test.attr(type=['negative'])
@test.idempotent_id('dd032027-0210-4d9c-860e-69b1b8deed5f')
def test_list_hosts_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_hosts)
@test.attr(type=['negative'])
@test.idempotent_id('e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f')
def test_show_host_detail_with_nonexistent_hostname(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.show_host, nonexitent_hostname)
@test.attr(type=['negative'])
@test.idempotent_id('19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc')
def test_show_host_detail_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.show_host,
hostname)
@test.attr(type=['negative'])
@test.idempotent_id('e40c72b1-0239-4ed6-ba21-81a184df1f7c')
def test_update_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.update_host,
hostname,
status='enable',
maintenance_mode='enable')
@test.attr(type=['negative'])
@test.idempotent_id('76e396fe-5418-4dd3-a186-5b301edc0721')
def test_update_host_with_extra_param(self):
# only 'status' and 'maintenance_mode' are the valid params.
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname,
status='enable',
maintenance_mode='enable',
param='XXX')
@test.attr(type=['negative'])
@test.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
def test_update_host_with_invalid_status(self):
# 'status' can only be 'enable' or 'disable'
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname,
status='invalid',
maintenance_mode='enable')
@test.attr(type=['negative'])
@test.idempotent_id('ab1e230e-5e22-41a9-8699-82b9947915d4')
def test_update_host_with_invalid_maintenance_mode(self):
# 'maintenance_mode' can only be 'enable' or 'disable'
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname,
status='enable',
maintenance_mode='invalid')
@test.attr(type=['negative'])
@test.idempotent_id('0cd85f75-6992-4a4a-b1bd-d11e37fd0eee')
def test_update_host_without_param(self):
# 'status' or 'maintenance_mode' needed for host update
hostname = self._get_host_name()
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
hostname)
@test.attr(type=['negative'])
@test.idempotent_id('23c92146-2100-4d68-b2d6-c7ade970c9c1')
def test_update_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.update_host,
nonexitent_hostname,
status='enable',
maintenance_mode='enable')
@test.attr(type=['negative'])
@test.idempotent_id('0d981ac3-4320-4898-b674-82b61fbb60e4')
def test_startup_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.startup_host,
nonexitent_hostname)
@test.attr(type=['negative'])
@test.idempotent_id('9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca')
def test_startup_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.startup_host,
hostname)
@test.attr(type=['negative'])
@test.idempotent_id('9e637444-29cf-4244-88c8-831ae82c31b6')
def test_shutdown_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.shutdown_host,
nonexitent_hostname)
@test.attr(type=['negative'])
@test.idempotent_id('a803529c-7e3f-4d3c-a7d6-8e1c203d27f6')
def test_shutdown_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.shutdown_host,
hostname)
@test.attr(type=['negative'])
@test.idempotent_id('f86bfd7b-0b13-4849-ae29-0322e83ee58b')
def test_reboot_nonexistent_host(self):
nonexitent_hostname = data_utils.rand_name('rand_hostname')
self.assertRaises(lib_exc.NotFound,
self.client.reboot_host,
nonexitent_hostname)
@test.attr(type=['negative'])
@test.idempotent_id('02d79bb9-eb57-4612-abf6-2cb38897d2f8')
def test_reboot_host_with_non_admin_user(self):
hostname = self._get_host_name()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reboot_host,
hostname)
| apache-2.0 |
Leila20/django | tests/fixtures/tests.py | 14 | 42302 | from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, mock, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import (
Article, Category, PrimaryKeyUUIDModel, ProxySpy, Spy, Tag, Visa,
)
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"Check that there were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_uuid_pks(self):
m1 = PrimaryKeyUUIDModel.objects.create()
m2 = PrimaryKeyUUIDModel.objects.create()
output = six.StringIO()
management.call_command(
'dumpdata', 'fixtures.PrimaryKeyUUIDModel', '--pks', ', '.join([str(m1.id), str(m2.id)]),
stdout=output,
)
result = output.getvalue()
self.assertIn('"pk": "%s"' % m1.id, result)
self.assertIn('"pk": "%s"' % m2.id, result)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
warning = warning_list.pop()
self.assertEqual(warning.category, ProxyModelWarning)
self.assertEqual(
str(warning.message),
"fixtures.ProxySpy is a proxy model and won't be serialized."
)
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loaddata_app_option(self):
"""
Verifies that the --app option works.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
def test_loading_with_exclude_app(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertFalse(Category.objects.exists())
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_loading_with_exclude_model(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures.Article'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertQuerysetEqual(Category.objects.all(), ['<Category: News Stories>'])
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_exclude_option_errors(self):
"""Excluding a bogus app or model should raise an error."""
msg = "No installed app with label 'foo_app'."
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['foo_app'], verbosity=0)
msg = "Unknown model: fixtures.FooModel"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['fixtures.FooModel'], verbosity=0)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
available_apps = ['django.contrib.auth', 'django.contrib.contenttypes']
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
| bsd-3-clause |
80vs90/libsaas | libsaas/services/zendesk/service.py | 4 | 5909 | import json
from libsaas import http, parsers, port
from libsaas.filters import auth
from libsaas.services import base
from . import resources
class Zendesk(base.Resource):
"""
"""
def __init__(self, subdomain, username=None, password=None,
access_token=None):
"""
Create a Zendesk service.
:var subdomain: The account-specific part of the Zendesk domain, for
instance use `mycompany` if your Zendesk domain is
`mycompany.zendesk.com`.
:vartype subdomain: str
:var username: The email of the authenticated agent. Use
`user@company.com/token` for token-based authentication.
:vartype username: str
:var password: The password of the authenticated agent, or an API token
if using token-based authentication.
:vartype password: str
:var access_token: An OAuth Access token. Username and password are not
required if the OAuth Access token is provided.
:vartype access_token: str
"""
tmpl = '{0}.zendesk.com/api/v2'
self.apiroot = http.quote_any(tmpl.format(port.to_u(subdomain)))
self.apiroot = 'https://' + self.apiroot
if access_token:
self.access_token = access_token
self.add_filter(self.add_authorization)
else:
self.add_filter(auth.BasicAuth(username, password))
self.add_filter(self.use_json)
def add_authorization(self, request):
request.headers['Authorization'] = 'Bearer {0}'.format(
self.access_token)
def get_url(self):
return self.apiroot
def use_json(self, request):
request.headers['Content-Type'] = 'application/json'
request.headers['Accept'] = 'application/json'
request.uri += '.json'
if request.method.upper() not in http.URLENCODE_METHODS:
request.params = json.dumps(request.params)
def set_access_token(self, access_token):
self.access_token = access_token
@base.resource(resources.Tickets)
def tickets(self):
"""
Return the resource corresponding to all the tickets.
"""
return resources.Tickets(self)
@base.resource(resources.Ticket)
def ticket(self, ticket_id):
"""
Return the resource corresponding to a single ticket.
"""
return resources.Ticket(self, ticket_id)
@base.resource(resources.Users)
def users(self):
"""
Return the resource corresponding to all users.
"""
return resources.Users(self)
@base.resource(resources.User, resources.CurrentUser)
def user(self, user_id=None):
"""
Return the resource corresponding to a single user. If `user_id` is
`None` the returned resource is the currently authenticated user,
otherwise it is the user with the given ID number.
"""
if user_id is None:
return resources.CurrentUser(self)
return resources.User(self, user_id)
@base.resource(resources.Groups)
def groups(self):
"""
Return the resource corresponding to all groups.
"""
return resources.Groups(self)
@base.resource(resources.Group)
def group(self, group_id):
"""
Return the resource corresponding to a single group.
"""
return resources.Group(self, group_id)
@base.resource(resources.Activities)
def activities(self):
"""
Return the resource corresponding to all activities.
"""
return resources.Activities(self)
@base.resource(resources.Activity)
def activity(self, activity_id):
"""
Return the resource corresponding to a single activity.
"""
return resources.Activity(self, activity_id)
@base.resource(resources.SatisfactionRatings)
def satisfaction_ratings(self):
"""
Return the resource corresponding to all satisfaction ratings.
"""
return resources.SatisfactionRatings(self)
@base.resource(resources.SatisfactionRating)
def satisfaction_rating(self, rating_id):
"""
Return the resource corresponding to a single satisfaction rating.
"""
return resources.SatisfactionRating(self, rating_id)
@base.apimethod
def search(self, query, sort_order=None,
sort_by=None, page=None, per_page=None):
"""
Fetch the results of a search on your Zendesk account. For details on
searching, see
http://developer.zendesk.com/documentation/rest_api/search.html
:var query: A free-form search term.
:vartype query: str
:var sort_order: Optional order in which to sort the results.
:vartype query: str
:var sort_by: Optional term by which to sort the results.
:vartype sort_by: str
"""
url = '{0}/{1}'.format(self.get_url(), 'search')
params = base.get_params(('query', 'sort_order', 'sort_by',
'page', 'per_page'), locals())
return http.Request('GET', url, params), parsers.parse_json
@base.resource(resources.Views)
def views(self):
"""
Return the resource corresponding to all views.
"""
return resources.Views(self)
@base.resource(resources.View)
def view(self, view_id):
"""
Return the resource corresponding to a single view.
"""
return resources.View(self, view_id)
@base.resource(resources.Exports)
def exports(self):
"""
Return the resource corresponding to exports.
"""
return resources.Exports(self)
@base.resource(resources.Tags)
def tags(self):
"""
Return the resource corresponding to tags.
"""
return resources.Tags(self)
| mit |
rcharp/toyota-flask | numpy/numpy/doc/subclassing.py | 139 | 20225 | """
=============================
Subclassing ndarray in python
=============================
Credits
-------
This page is based with thanks on the wiki page on subclassing by Pierre
Gerard-Marchant - http://www.scipy.org/Subclasses.
Introduction
------------
Subclassing ndarray is relatively simple, but it has some complications
compared to other Python objects. On this page we explain the machinery
that allows you to subclass ndarray, and the implications for
implementing a subclass.
ndarrays and object creation
============================
Subclassing ndarray is complicated by the fact that new instances of
ndarray classes can come about in three different ways. These are:
#. Explicit constructor call - as in ``MySubClass(params)``. This is
the usual route to Python instance creation.
#. View casting - casting an existing ndarray as a given subclass
#. New from template - creating a new instance from a template
instance. Examples include returning slices from a subclassed array,
creating return types from ufuncs, and copying arrays. See
:ref:`new-from-template` for more details
The last two are characteristics of ndarrays - in order to support
things like array slicing. The complications of subclassing ndarray are
due to the mechanisms numpy has to support these latter two routes of
instance creation.
.. _view-casting:
View casting
------------
*View casting* is the standard ndarray mechanism by which you take an
ndarray of any subclass, and return a view of the array as another
(specified) subclass:
>>> import numpy as np
>>> # create a completely useless ndarray subclass
>>> class C(np.ndarray): pass
>>> # create a standard ndarray
>>> arr = np.zeros((3,))
>>> # take a view of it, as our useless subclass
>>> c_arr = arr.view(C)
>>> type(c_arr)
<class 'C'>
.. _new-from-template:
Creating new from template
--------------------------
New instances of an ndarray subclass can also come about by a very
similar mechanism to :ref:`view-casting`, when numpy finds it needs to
create a new instance from a template instance. The most obvious place
this has to happen is when you are taking slices of subclassed arrays.
For example:
>>> v = c_arr[1:]
>>> type(v) # the view is of type 'C'
<class 'C'>
>>> v is c_arr # but it's a new instance
False
The slice is a *view* onto the original ``c_arr`` data. So, when we
take a view from the ndarray, we return a new ndarray, of the same
class, that points to the data in the original.
There are other points in the use of ndarrays where we need such views,
such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
(see also :ref:`array-wrap`), and reducing methods (like
``c_arr.mean()``.
Relationship of view casting and new-from-template
--------------------------------------------------
These paths both use the same machinery. We make the distinction here,
because they result in different input to your methods. Specifically,
:ref:`view-casting` means you have created a new instance of your array
type from any potential subclass of ndarray. :ref:`new-from-template`
means you have created a new instance of your class from a pre-existing
instance, allowing you - for example - to copy across attributes that
are particular to your subclass.
Implications for subclassing
----------------------------
If we subclass ndarray, we need to deal not only with explicit
construction of our array type, but also :ref:`view-casting` or
:ref:`new-from-template`. Numpy has the machinery to do this, and this
machinery that makes subclassing slightly non-standard.
There are two aspects to the machinery that ndarray uses to support
views and new-from-template in subclasses.
The first is the use of the ``ndarray.__new__`` method for the main work
of object initialization, rather then the more usual ``__init__``
method. The second is the use of the ``__array_finalize__`` method to
allow subclasses to clean up after the creation of views and new
instances from templates.
A brief Python primer on ``__new__`` and ``__init__``
=====================================================
``__new__`` is a standard Python method, and, if present, is called
before ``__init__`` when we create a class instance. See the `python
__new__ documentation
<http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
For example, consider the following Python code:
.. testcode::
class C(object):
def __new__(cls, *args):
print 'Cls in __new__:', cls
print 'Args in __new__:', args
return object.__new__(cls, *args)
def __init__(self, *args):
print 'type(self) in __init__:', type(self)
print 'Args in __init__:', args
meaning that we get:
>>> c = C('hello')
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
type(self) in __init__: <class 'C'>
Args in __init__: ('hello',)
When we call ``C('hello')``, the ``__new__`` method gets its own class
as first argument, and the passed argument, which is the string
``'hello'``. After python calls ``__new__``, it usually (see below)
calls our ``__init__`` method, with the output of ``__new__`` as the
first argument (now a class instance), and the passed arguments
following.
As you can see, the object can be initialized in the ``__new__``
method or the ``__init__`` method, or both, and in fact ndarray does
not have an ``__init__`` method, because all the initialization is
done in the ``__new__`` method.
Why use ``__new__`` rather than just the usual ``__init__``? Because
in some cases, as for ndarray, we want to be able to return an object
of some other class. Consider the following:
.. testcode::
class D(C):
def __new__(cls, *args):
print 'D cls is:', cls
print 'D args in __new__:', args
return C.__new__(C, *args)
def __init__(self, *args):
# we never get here
print 'In D __init__'
meaning that:
>>> obj = D('hello')
D cls is: <class 'D'>
D args in __new__: ('hello',)
Cls in __new__: <class 'C'>
Args in __new__: ('hello',)
>>> type(obj)
<class 'C'>
The definition of ``C`` is the same as before, but for ``D``, the
``__new__`` method returns an instance of class ``C`` rather than
``D``. Note that the ``__init__`` method of ``D`` does not get
called. In general, when the ``__new__`` method returns an object of
class other than the class in which it is defined, the ``__init__``
method of that class is not called.
This is how subclasses of the ndarray class are able to return views
that preserve the class type. When taking a view, the standard
ndarray machinery creates the new ndarray object with something
like::
obj = ndarray.__new__(subtype, shape, ...
where ``subdtype`` is the subclass. Thus the returned view is of the
same class as the subclass, rather than being of class ``ndarray``.
That solves the problem of returning views of the same type, but now
we have a new problem. The machinery of ndarray can set the class
this way, in its standard methods for taking views, but the ndarray
``__new__`` method knows nothing of what we have done in our own
``__new__`` method in order to set attributes, and so on. (Aside -
why not call ``obj = subdtype.__new__(...`` then? Because we may not
have a ``__new__`` method with the same call signature).
The role of ``__array_finalize__``
==================================
``__array_finalize__`` is the mechanism that numpy provides to allow
subclasses to handle the various ways that new instances get created.
Remember that subclass instances can come about in these three ways:
#. explicit constructor call (``obj = MySubClass(params)``). This will
call the usual sequence of ``MySubClass.__new__`` then (if it exists)
``MySubClass.__init__``.
#. :ref:`view-casting`
#. :ref:`new-from-template`
Our ``MySubClass.__new__`` method only gets called in the case of the
explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
``MySubClass.__init__`` to deal with the view casting and
new-from-template. It turns out that ``MySubClass.__array_finalize__``
*does* get called for all three methods of object creation, so this is
where our object creation housekeeping usually goes.
* For the explicit constructor call, our subclass will need to create a
new ndarray instance of its own class. In practice this means that
we, the authors of the code, will need to make a call to
``ndarray.__new__(MySubClass,...)``, or do view casting of an existing
array (see below)
* For view casting and new-from-template, the equivalent of
``ndarray.__new__(MySubClass,...`` is called, at the C level.
The arguments that ``__array_finalize__`` recieves differ for the three
methods of instance creation above.
The following code allows us to look at the call sequences and arguments:
.. testcode::
import numpy as np
class C(np.ndarray):
def __new__(cls, *args, **kwargs):
print 'In __new__ with class %s' % cls
return np.ndarray.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# in practice you probably will not need or want an __init__
# method for your subclass
print 'In __init__ with class %s' % self.__class__
def __array_finalize__(self, obj):
print 'In array_finalize:'
print ' self type is %s' % type(self)
print ' obj type is %s' % type(obj)
Now:
>>> # Explicit constructor
>>> c = C((10,))
In __new__ with class <class 'C'>
In array_finalize:
self type is <class 'C'>
obj type is <type 'NoneType'>
In __init__ with class <class 'C'>
>>> # View casting
>>> a = np.arange(10)
>>> cast_a = a.view(C)
In array_finalize:
self type is <class 'C'>
obj type is <type 'numpy.ndarray'>
>>> # Slicing (example of new-from-template)
>>> cv = c[:1]
In array_finalize:
self type is <class 'C'>
obj type is <class 'C'>
The signature of ``__array_finalize__`` is::
def __array_finalize__(self, obj):
``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our
own class (``self``) as well as the object from which the view has been
taken (``obj``). As you can see from the output above, the ``self`` is
always a newly created instance of our subclass, and the type of ``obj``
differs for the three instance creation methods:
* When called from the explicit constructor, ``obj`` is ``None``
* When called from view casting, ``obj`` can be an instance of any
subclass of ndarray, including our own.
* When called in new-from-template, ``obj`` is another instance of our
own subclass, that we might use to update the new ``self`` instance.
Because ``__array_finalize__`` is the only method that always sees new
instances being created, it is the sensible place to fill in instance
defaults for new object attributes, among other tasks.
This may be clearer with an example.
Simple example - adding an extra attribute to ndarray
-----------------------------------------------------
.. testcode::
import numpy as np
class InfoArray(np.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None, info=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
# set the new 'info' attribute to the value passed
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.info = getattr(obj, 'info', None)
# We do not need to return anything
Using the object looks like this:
>>> obj = InfoArray(shape=(3,)) # explicit constructor
>>> type(obj)
<class 'InfoArray'>
>>> obj.info is None
True
>>> obj = InfoArray(shape=(3,), info='information')
>>> obj.info
'information'
>>> v = obj[1:] # new-from-template - here - slicing
>>> type(v)
<class 'InfoArray'>
>>> v.info
'information'
>>> arr = np.arange(10)
>>> cast_arr = arr.view(InfoArray) # view casting
>>> type(cast_arr)
<class 'InfoArray'>
>>> cast_arr.info is None
True
This class isn't very useful, because it has the same constructor as the
bare ndarray object, including passing in buffers and shapes and so on.
We would probably prefer the constructor to be able to take an already
formed ndarray from the usual numpy calls to ``np.array`` and return an
object.
Slightly more realistic example - attribute added to existing array
-------------------------------------------------------------------
Here is a class that takes a standard ndarray that already exists, casts
as our type, and adds an extra attribute.
.. testcode::
import numpy as np
class RealisticInfoArray(np.ndarray):
def __new__(cls, input_array, info=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.info = info
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
self.info = getattr(obj, 'info', None)
So:
>>> arr = np.arange(5)
>>> obj = RealisticInfoArray(arr, info='information')
>>> type(obj)
<class 'RealisticInfoArray'>
>>> obj.info
'information'
>>> v = obj[1:]
>>> type(v)
<class 'RealisticInfoArray'>
>>> v.info
'information'
.. _array-wrap:
``__array_wrap__`` for ufuncs
-------------------------------------------------------
``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy
functions, to allow a subclass to set the type of the return value
and update attributes and metadata. Let's show how this works with an example.
First we make the same subclass as above, but with a different name and
some print statements:
.. testcode::
import numpy as np
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def __array_finalize__(self, obj):
print 'In __array_finalize__:'
print ' self is %s' % repr(self)
print ' obj is %s' % repr(obj)
if obj is None: return
self.info = getattr(obj, 'info', None)
def __array_wrap__(self, out_arr, context=None):
print 'In __array_wrap__:'
print ' self is %s' % repr(self)
print ' arr is %s' % repr(out_arr)
# then just call the parent
return np.ndarray.__array_wrap__(self, out_arr, context)
We run a ufunc on an instance of our new array:
>>> obj = MySubClass(np.arange(5), info='spam')
In __array_finalize__:
self is MySubClass([0, 1, 2, 3, 4])
obj is array([0, 1, 2, 3, 4])
>>> arr2 = np.arange(5)+1
>>> ret = np.add(arr2, obj)
In __array_wrap__:
self is MySubClass([0, 1, 2, 3, 4])
arr is array([1, 3, 5, 7, 9])
In __array_finalize__:
self is MySubClass([1, 3, 5, 7, 9])
obj is MySubClass([0, 1, 2, 3, 4])
>>> ret
MySubClass([1, 3, 5, 7, 9])
>>> ret.info
'spam'
Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the
input with the highest ``__array_priority__`` value, in this case
``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and
``out_arr`` as the (ndarray) result of the addition. In turn, the
default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the
result to class ``MySubClass``, and called ``__array_finalize__`` -
hence the copying of the ``info`` attribute. This has all happened at the C level.
But, we could do anything we wanted:
.. testcode::
class SillySubClass(np.ndarray):
def __array_wrap__(self, arr, context=None):
return 'I lost your data'
>>> arr1 = np.arange(5)
>>> obj = arr1.view(SillySubClass)
>>> arr2 = np.arange(5)
>>> ret = np.multiply(obj, arr2)
>>> ret
'I lost your data'
So, by defining a specific ``__array_wrap__`` method for our subclass,
we can tweak the output from ufuncs. The ``__array_wrap__`` method
requires ``self``, then an argument - which is the result of the ufunc -
and an optional parameter *context*. This parameter is returned by some
ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc,
domain of the ufunc). ``__array_wrap__`` should return an instance of
its containing class. See the masked array subclass for an
implementation.
In addition to ``__array_wrap__``, which is called on the way out of the
ufunc, there is also an ``__array_prepare__`` method which is called on
the way into the ufunc, after the output arrays are created but before any
computation has been performed. The default implementation does nothing
but pass through the array. ``__array_prepare__`` should not attempt to
access the array data or resize the array, it is intended for setting the
output array type, updating attributes and metadata, and performing any
checks based on the input that may be desired before computation begins.
Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
subclass thereof or raise an error.
Extra gotchas - custom ``__del__`` methods and ndarray.base
-----------------------------------------------------------
One of the problems that ndarray solves is keeping track of memory
ownership of ndarrays and their views. Consider the case where we have
created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
The two objects are looking at the same memory. Numpy keeps track of
where the data came from for a particular array or view, with the
``base`` attribute:
>>> # A normal ndarray, that owns its own data
>>> arr = np.zeros((4,))
>>> # In this case, base is None
>>> arr.base is None
True
>>> # We take a view
>>> v1 = arr[1:]
>>> # base now points to the array that it derived from
>>> v1.base is arr
True
>>> # Take a view of a view
>>> v2 = v1[1:]
>>> # base points to the view it derived from
>>> v2.base is v1
True
In general, if the array owns its own memory, as for ``arr`` in this
case, then ``arr.base`` will be None - there are some exceptions to this
- see the numpy book for more details.
The ``base`` attribute is useful in being able to tell whether we have
a view or the original array. This in turn can be useful if we need
to know whether or not to do some specific cleanup when the subclassed
array is deleted. For example, we may only want to do the cleanup if
the original array is deleted, but not the views. For an example of
how this can work, have a look at the ``memmap`` class in
``numpy.core``.
"""
from __future__ import division, absolute_import, print_function
| apache-2.0 |
grhawk/ASE | ase/old.py | 10 | 6716 | import numpy as np
try:
import Numeric as num
except ImportError:
pass
else:
def npy2num(a, typecode=num.Float):
return num.array(a, typecode)
if num.__version__ <= '23.8':
#def npy2num(a, typecode=num.Float):
# return num.array(a.tolist(), typecode)
def npy2num(a, typecode=num.Float):
b = num.fromstring(a.tostring(), typecode)
b.shape = a.shape
return b
from ase.data import chemical_symbols
class OldASEListOfAtomsWrapper:
def __init__(self, atoms):
self.atoms = atoms
self.constraints = []
def get_positions(self):
return np.array(self.atoms.GetCartesianPositions())
def get_calculator(self):
calc = self.atoms.GetCalculator()
if calc is not None:
return OldASECalculatorWrapper(calc)
def get_potential_energy(self):
return self.atoms.GetPotentialEnergy()
def get_forces(self):
return np.array(self.atoms.GetCartesianForces())
def get_stress(self):
return np.array(self.atoms.GetStress())
def get_atomic_numbers(self):
return np.array(self.atoms.GetAtomicNumbers())
def get_tags(self):
return np.array(self.atoms.GetTags())
def get_momenta(self):
return np.array(self.atoms.GetCartesianMomenta())
def get_masses(self):
return np.array(self.atoms.GetMasses())
def get_initial_magnetic_moments(self):
return np.array(self.atoms.GetMagneticMoments())
def get_magnetic_moments(self):
return None
def get_charges(self):
return np.zeros(len(self))
def has(self, name):
return True
def get_cell(self):
return np.array(self.atoms.GetUnitCell())
def get_pbc(self):
return np.array(self.atoms.GetBoundaryConditions(), bool)
def __len__(self):
return len(self.atoms)
def copy(self):
from ase.atoms import Atoms
return Atoms(positions=self.get_positions(),
numbers=self.get_atomic_numbers(),
tags=self.get_tags(),
momenta=self.get_momenta(),
masses=self.get_masses(),
magmoms=self.get_initial_magnetic_moments(),
charges=self.get_charges(),
cell=self.get_cell(),
pbc=self.get_pbc(),
constraint=None,
calculator=None) # Don't copy the calculator
class OldASECalculatorWrapper:
def __init__(self, calc, atoms=None):
self.calc = calc
if atoms is None:
try:
self.atoms = calc.GetListOfAtoms()
except AttributeError:
self.atoms = None
else:
from ASE import Atom, ListOfAtoms
numbers = atoms.get_atomic_numbers()
positions = atoms.get_positions()
magmoms = atoms.get_initial_magnetic_moments()
self.atoms = ListOfAtoms(
[Atom(Z=numbers[a], position=positions[a], magmom=magmoms[a])
for a in range(len(atoms))],
cell=npy2num(atoms.get_cell()),
periodic=tuple(atoms.get_pbc()))
self.atoms.SetCalculator(calc)
def get_atoms(self):
return OldASEListOfAtomsWrapper(self.atoms)
def get_potential_energy(self, atoms):
self.atoms.SetCartesianPositions(npy2num(atoms.get_positions()))
self.atoms.SetUnitCell(npy2num(atoms.get_cell()), fix=True)
return self.calc.GetPotentialEnergy()
def get_forces(self, atoms):
self.atoms.SetCartesianPositions(npy2num(atoms.get_positions()))
self.atoms.SetUnitCell(npy2num(atoms.get_cell()), fix=True)
return np.array(self.calc.GetCartesianForces())
def get_stress(self, atoms):
self.atoms.SetCartesianPositions(npy2num(atoms.get_positions()))
self.atoms.SetUnitCell(npy2num(atoms.get_cell()), fix=True)
return np.array(self.calc.GetStress())
def get_number_of_bands(self):
return self.calc.GetNumberOfBands()
def get_kpoint_weights(self):
return np.array(self.calc.GetIBZKPointWeights())
def get_number_of_spins(self):
return 1 + int(self.calc.GetSpinPolarized())
def get_eigenvalues(self, kpt=0, spin=0):
return np.array(self.calc.GetEigenvalues(kpt, spin))
def get_fermi_level(self):
return self.calc.GetFermiLevel()
def get_number_of_grid_points(self):
return np.array(self.get_pseudo_wave_function(0, 0, 0).shape)
def get_pseudo_wave_function(self, n=0, k=0, s=0, pad=True):
kpt = self.get_bz_k_points()[k]
state = self.calc.GetElectronicStates().GetState(band=n, spin=s,
kptindex=k)
# Get wf, without bolch phase (Phase = True doesn't do anything!)
wave = state.GetWavefunctionOnGrid(phase=False)
# Add bloch phase if this is not the Gamma point
if np.all(kpt == 0):
return wave
coord = state.GetCoordinates()
phase = coord[0] * kpt[0] + coord[1] * kpt[1] + coord[2] * kpt[2]
return np.array(wave) * np.exp(-2.j * np.pi * phase) # sign! XXX
#return np.array(self.calc.GetWaveFunctionArray(n, k, s)) # No phase!
def get_bz_k_points(self):
return np.array(self.calc.GetBZKPoints())
def get_ibz_k_points(self):
return np.array(self.calc.GetIBZKPoints())
def get_wannier_localization_matrix(self, nbands, dirG, kpoint,
nextkpoint, G_I, spin):
return np.array(self.calc.GetWannierLocalizationMatrix(
G_I=G_I.tolist(), nbands=nbands, dirG=dirG.tolist(),
kpoint=kpoint, nextkpoint=nextkpoint, spin=spin))
def initial_wannier(self, initialwannier, kpointgrid, fixedstates,
edf, spin):
# Use initial guess to determine U and C
init = self.calc.InitialWannier(initialwannier, self.atoms,
npy2num(kpointgrid, num.Int))
states = self.calc.GetElectronicStates()
waves = [[state.GetWaveFunction()
for state in states.GetStatesKPoint(k, spin)]
for k in self.calc.GetIBZKPoints()]
init.SetupMMatrix(waves, self.calc.GetBZKPoints())
c, U = init.GetListOfCoefficientsAndRotationMatrices(
(self.calc.GetNumberOfBands(), fixedstates, edf))
U = np.array(U)
for k in range(len(c)):
c[k] = np.array(c[k])
return c, U
| gpl-2.0 |
Soya93/Extract-Refactoring | python/helpers/py2only/roman.py | 227 | 2687 | """Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) != n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| apache-2.0 |
mapbased/phantomjs | src/qt/qtwebkit/Source/WebCore/inspector/CodeGeneratorInspector.py | 117 | 97853 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2012 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import sys
import string
import optparse
import re
try:
import json
except ImportError:
import simplejson as json
import CodeGeneratorInspectorStrings
DOMAIN_DEFINE_NAME_MAP = {
"Database": "SQL_DATABASE",
"Debugger": "JAVASCRIPT_DEBUGGER",
"DOMDebugger": "JAVASCRIPT_DEBUGGER",
"FileSystem": "FILE_SYSTEM",
"IndexedDB": "INDEXED_DATABASE",
"Profiler": "JAVASCRIPT_DEBUGGER",
"Worker": "WORKERS",
}
# Manually-filled map of type name replacements.
TYPE_NAME_FIX_MAP = {
"RGBA": "Rgba", # RGBA is reported to be conflicting with a define name in Windows CE.
"": "Empty",
}
TYPES_WITH_RUNTIME_CAST_SET = frozenset(["Runtime.RemoteObject", "Runtime.PropertyDescriptor", "Runtime.InternalPropertyDescriptor",
"Debugger.FunctionDetails", "Debugger.CallFrame",
"Canvas.TraceLog", "Canvas.ResourceInfo", "Canvas.ResourceState",
# This should be a temporary hack. TimelineEvent should be created via generated C++ API.
"Timeline.TimelineEvent"])
TYPES_WITH_OPEN_FIELD_LIST_SET = frozenset(["Timeline.TimelineEvent",
# InspectorStyleSheet not only creates this property but wants to read it and modify it.
"CSS.CSSProperty",
# InspectorResourceAgent needs to update mime-type.
"Network.Response"])
EXACTLY_INT_SUPPORTED = False
cmdline_parser = optparse.OptionParser()
cmdline_parser.add_option("--output_h_dir")
cmdline_parser.add_option("--output_cpp_dir")
cmdline_parser.add_option("--write_always", action="store_true")
try:
arg_options, arg_values = cmdline_parser.parse_args()
if (len(arg_values) != 1):
raise Exception("Exactly one plain argument expected (found %s)" % len(arg_values))
input_json_filename = arg_values[0]
output_header_dirname = arg_options.output_h_dir
output_cpp_dirname = arg_options.output_cpp_dir
write_always = arg_options.write_always
if not output_header_dirname:
raise Exception("Output .h directory must be specified")
if not output_cpp_dirname:
raise Exception("Output .cpp directory must be specified")
except Exception:
# Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
exc = sys.exc_info()[1]
sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
sys.stderr.write("Usage: <script> Inspector.json --output_h_dir <output_header_dir> --output_cpp_dir <output_cpp_dir> [--write_always]\n")
exit(1)
def dash_to_camelcase(word):
return ''.join(x.capitalize() or '-' for x in word.split('-'))
def fix_camel_case(name):
refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
refined = to_title_case(refined)
return re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
def to_title_case(name):
return name[:1].upper() + name[1:]
class Capitalizer:
@staticmethod
def lower_camel_case_to_upper(str):
if len(str) > 0 and str[0].islower():
str = str[0].upper() + str[1:]
return str
@staticmethod
def upper_camel_case_to_lower(str):
pos = 0
while pos < len(str) and str[pos].isupper():
pos += 1
if pos == 0:
return str
if pos == 1:
return str[0].lower() + str[1:]
if pos < len(str):
pos -= 1
possible_abbreviation = str[0:pos]
if possible_abbreviation not in Capitalizer.ABBREVIATION:
raise Exception("Unknown abbreviation %s" % possible_abbreviation)
str = possible_abbreviation.lower() + str[pos:]
return str
@staticmethod
def camel_case_to_capitalized_with_underscores(str):
if len(str) == 0:
return str
output = Capitalizer.split_camel_case_(str)
return "_".join(output).upper()
@staticmethod
def split_camel_case_(str):
output = []
pos_being = 0
pos = 1
has_oneletter = False
while pos < len(str):
if str[pos].isupper():
output.append(str[pos_being:pos].upper())
if pos - pos_being == 1:
has_oneletter = True
pos_being = pos
pos += 1
output.append(str[pos_being:])
if has_oneletter:
array_pos = 0
while array_pos < len(output) - 1:
if len(output[array_pos]) == 1:
array_pos_end = array_pos + 1
while array_pos_end < len(output) and len(output[array_pos_end]) == 1:
array_pos_end += 1
if array_pos_end - array_pos > 1:
possible_abbreviation = "".join(output[array_pos:array_pos_end])
if possible_abbreviation.upper() in Capitalizer.ABBREVIATION:
output[array_pos:array_pos_end] = [possible_abbreviation]
else:
array_pos = array_pos_end - 1
array_pos += 1
return output
ABBREVIATION = frozenset(["XHR", "DOM", "CSS"])
VALIDATOR_IFDEF_NAME = "!ASSERT_DISABLED"
class DomainNameFixes:
@classmethod
def get_fixed_data(cls, domain_name):
field_name_res = Capitalizer.upper_camel_case_to_lower(domain_name) + "Agent"
class Res(object):
skip_js_bind = domain_name in cls.skip_js_bind_domains
agent_field_name = field_name_res
@staticmethod
def get_guard():
if domain_name in DOMAIN_DEFINE_NAME_MAP:
define_name = DOMAIN_DEFINE_NAME_MAP[domain_name]
class Guard:
@staticmethod
def generate_open(output):
output.append("#if ENABLE(%s)\n" % define_name)
@staticmethod
def generate_close(output):
output.append("#endif // ENABLE(%s)\n" % define_name)
return Guard
return Res
skip_js_bind_domains = set(["DOMDebugger"])
class RawTypes(object):
@staticmethod
def get(json_type):
if json_type == "boolean":
return RawTypes.Bool
elif json_type == "string":
return RawTypes.String
elif json_type == "array":
return RawTypes.Array
elif json_type == "object":
return RawTypes.Object
elif json_type == "integer":
return RawTypes.Int
elif json_type == "number":
return RawTypes.Number
elif json_type == "any":
return RawTypes.Any
else:
raise Exception("Unknown type: %s" % json_type)
# For output parameter all values are passed by pointer except RefPtr-based types.
class OutputPassModel:
class ByPointer:
@staticmethod
def get_argument_prefix():
return "&"
@staticmethod
def get_parameter_type_suffix():
return "*"
class ByReference:
@staticmethod
def get_argument_prefix():
return ""
@staticmethod
def get_parameter_type_suffix():
return "&"
class BaseType(object):
need_internal_runtime_cast_ = False
@classmethod
def request_raw_internal_runtime_cast(cls):
if not cls.need_internal_runtime_cast_:
cls.need_internal_runtime_cast_ = True
@classmethod
def get_raw_validator_call_text(cls):
return "RuntimeCastHelper::assertType<InspectorValue::Type%s>" % cls.get_validate_method_params().template_type
class String(BaseType):
@staticmethod
def get_getter_name():
return "String"
get_setter_name = get_getter_name
@staticmethod
def get_c_initializer():
return "\"\""
@staticmethod
def get_js_bind_type():
return "string"
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "String"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "String"
@staticmethod
def get_raw_type_model():
return TypeModel.String
class Int(BaseType):
@staticmethod
def get_getter_name():
return "Int"
@staticmethod
def get_setter_name():
return "Number"
@staticmethod
def get_c_initializer():
return "0"
@staticmethod
def get_js_bind_type():
return "number"
@classmethod
def get_raw_validator_call_text(cls):
return "RuntimeCastHelper::assertInt"
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return False
@staticmethod
def get_array_item_raw_c_type_text():
return "int"
@staticmethod
def get_raw_type_model():
return TypeModel.Int
class Number(BaseType):
@staticmethod
def get_getter_name():
return "Double"
@staticmethod
def get_setter_name():
return "Number"
@staticmethod
def get_c_initializer():
return "0"
@staticmethod
def get_js_bind_type():
return "number"
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Number"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return False
@staticmethod
def get_array_item_raw_c_type_text():
return "double"
@staticmethod
def get_raw_type_model():
return TypeModel.Number
class Bool(BaseType):
@staticmethod
def get_getter_name():
return "Boolean"
get_setter_name = get_getter_name
@staticmethod
def get_c_initializer():
return "false"
@staticmethod
def get_js_bind_type():
return "boolean"
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Boolean"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByPointer
@staticmethod
def is_heavy_value():
return False
@staticmethod
def get_array_item_raw_c_type_text():
return "bool"
@staticmethod
def get_raw_type_model():
return TypeModel.Bool
class Object(BaseType):
@staticmethod
def get_getter_name():
return "Object"
@staticmethod
def get_setter_name():
return "Value"
@staticmethod
def get_c_initializer():
return "InspectorObject::create()"
@staticmethod
def get_js_bind_type():
return "object"
@staticmethod
def get_output_argument_prefix():
return ""
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Object"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByReference
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "InspectorObject"
@staticmethod
def get_raw_type_model():
return TypeModel.Object
class Any(BaseType):
@staticmethod
def get_getter_name():
return "Value"
get_setter_name = get_getter_name
@staticmethod
def get_c_initializer():
raise Exception("Unsupported")
@staticmethod
def get_js_bind_type():
raise Exception("Unsupported")
@staticmethod
def get_raw_validator_call_text():
return "RuntimeCastHelper::assertAny"
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByReference
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "InspectorValue"
@staticmethod
def get_raw_type_model():
return TypeModel.Any
class Array(BaseType):
@staticmethod
def get_getter_name():
return "Array"
@staticmethod
def get_setter_name():
return "Value"
@staticmethod
def get_c_initializer():
return "InspectorArray::create()"
@staticmethod
def get_js_bind_type():
return "object"
@staticmethod
def get_output_argument_prefix():
return ""
@staticmethod
def get_validate_method_params():
class ValidateMethodParams:
template_type = "Array"
return ValidateMethodParams
@staticmethod
def get_output_pass_model():
return RawTypes.OutputPassModel.ByReference
@staticmethod
def is_heavy_value():
return True
@staticmethod
def get_array_item_raw_c_type_text():
return "InspectorArray"
@staticmethod
def get_raw_type_model():
return TypeModel.Array
def replace_right_shift(input_str):
return input_str.replace(">>", "> >")
class CommandReturnPassModel:
class ByReference:
def __init__(self, var_type, set_condition):
self.var_type = var_type
self.set_condition = set_condition
def get_return_var_type(self):
return self.var_type
@staticmethod
def get_output_argument_prefix():
return ""
@staticmethod
def get_output_to_raw_expression():
return "%s"
def get_output_parameter_type(self):
return self.var_type + "&"
def get_set_return_condition(self):
return self.set_condition
class ByPointer:
def __init__(self, var_type):
self.var_type = var_type
def get_return_var_type(self):
return self.var_type
@staticmethod
def get_output_argument_prefix():
return "&"
@staticmethod
def get_output_to_raw_expression():
return "%s"
def get_output_parameter_type(self):
return self.var_type + "*"
@staticmethod
def get_set_return_condition():
return None
class OptOutput:
def __init__(self, var_type):
self.var_type = var_type
def get_return_var_type(self):
return "TypeBuilder::OptOutput<%s>" % self.var_type
@staticmethod
def get_output_argument_prefix():
return "&"
@staticmethod
def get_output_to_raw_expression():
return "%s.getValue()"
def get_output_parameter_type(self):
return "TypeBuilder::OptOutput<%s>*" % self.var_type
@staticmethod
def get_set_return_condition():
return "%s.isAssigned()"
class TypeModel:
class RefPtrBased(object):
def __init__(self, class_name):
self.class_name = class_name
self.optional = False
def get_optional(self):
result = TypeModel.RefPtrBased(self.class_name)
result.optional = True
return result
def get_command_return_pass_model(self):
if self.optional:
set_condition = "%s"
else:
set_condition = None
return CommandReturnPassModel.ByReference(replace_right_shift("RefPtr<%s>" % self.class_name), set_condition)
def get_input_param_type_text(self):
return replace_right_shift("PassRefPtr<%s>" % self.class_name)
@staticmethod
def get_event_setter_expression_pattern():
return "%s"
class Enum(object):
def __init__(self, base_type_name):
self.type_name = base_type_name + "::Enum"
def get_optional(base_self):
class EnumOptional:
@classmethod
def get_optional(cls):
return cls
@staticmethod
def get_command_return_pass_model():
return CommandReturnPassModel.OptOutput(base_self.type_name)
@staticmethod
def get_input_param_type_text():
return base_self.type_name + "*"
@staticmethod
def get_event_setter_expression_pattern():
raise Exception("TODO")
return EnumOptional
def get_command_return_pass_model(self):
return CommandReturnPassModel.ByPointer(self.type_name)
def get_input_param_type_text(self):
return self.type_name
@staticmethod
def get_event_setter_expression_pattern():
return "%s"
class ValueType(object):
def __init__(self, type_name, is_heavy):
self.type_name = type_name
self.is_heavy = is_heavy
def get_optional(self):
return self.ValueOptional(self)
def get_command_return_pass_model(self):
return CommandReturnPassModel.ByPointer(self.type_name)
def get_input_param_type_text(self):
if self.is_heavy:
return "const %s&" % self.type_name
else:
return self.type_name
def get_opt_output_type_(self):
return self.type_name
@staticmethod
def get_event_setter_expression_pattern():
return "%s"
class ValueOptional:
def __init__(self, base):
self.base = base
def get_optional(self):
return self
def get_command_return_pass_model(self):
return CommandReturnPassModel.OptOutput(self.base.get_opt_output_type_())
def get_input_param_type_text(self):
return "const %s* const" % self.base.type_name
@staticmethod
def get_event_setter_expression_pattern():
return "*%s"
class ExactlyInt(ValueType):
def __init__(self):
TypeModel.ValueType.__init__(self, "int", False)
def get_input_param_type_text(self):
return "TypeBuilder::ExactlyInt"
def get_opt_output_type_(self):
return "TypeBuilder::ExactlyInt"
@classmethod
def init_class(cls):
cls.Bool = cls.ValueType("bool", False)
if EXACTLY_INT_SUPPORTED:
cls.Int = cls.ExactlyInt()
else:
cls.Int = cls.ValueType("int", False)
cls.Number = cls.ValueType("double", False)
cls.String = cls.ValueType("String", True,)
cls.Object = cls.RefPtrBased("InspectorObject")
cls.Array = cls.RefPtrBased("InspectorArray")
cls.Any = cls.RefPtrBased("InspectorValue")
TypeModel.init_class()
# Collection of InspectorObject class methods that are likely to be overloaded in generated class.
# We must explicitly import all overloaded methods or they won't be available to user.
INSPECTOR_OBJECT_SETTER_NAMES = frozenset(["setValue", "setBoolean", "setNumber", "setString", "setValue", "setObject", "setArray"])
def fix_type_name(json_name):
if json_name in TYPE_NAME_FIX_MAP:
fixed = TYPE_NAME_FIX_MAP[json_name]
class Result(object):
class_name = fixed
@staticmethod
def output_comment(writer):
writer.newline("// Type originally was named '%s'.\n" % json_name)
else:
class Result(object):
class_name = json_name
@staticmethod
def output_comment(writer):
pass
return Result
class Writer:
def __init__(self, output, indent):
self.output = output
self.indent = indent
def newline(self, str):
if (self.indent):
self.output.append(self.indent)
self.output.append(str)
def append(self, str):
self.output.append(str)
def newline_multiline(self, str):
parts = str.split('\n')
self.newline(parts[0])
for p in parts[1:]:
self.output.append('\n')
if p:
self.newline(p)
def append_multiline(self, str):
parts = str.split('\n')
self.append(parts[0])
for p in parts[1:]:
self.output.append('\n')
if p:
self.newline(p)
def get_indent(self):
return self.indent
def get_indented(self, additional_indent):
return Writer(self.output, self.indent + additional_indent)
def insert_writer(self, additional_indent):
new_output = []
self.output.append(new_output)
return Writer(new_output, self.indent + additional_indent)
class EnumConstants:
map_ = {}
constants_ = []
@classmethod
def add_constant(cls, value):
if value in cls.map_:
return cls.map_[value]
else:
pos = len(cls.map_)
cls.map_[value] = pos
cls.constants_.append(value)
return pos
@classmethod
def get_enum_constant_code(cls):
output = []
for item in cls.constants_:
output.append(" \"" + item + "\"")
return ",\n".join(output) + "\n"
# Typebuilder code is generated in several passes: first typedefs, then other classes.
# Manual pass management is needed because we cannot have forward declarations for typedefs.
class TypeBuilderPass:
TYPEDEF = "typedef"
MAIN = "main"
class TypeBindings:
@staticmethod
def create_named_type_declaration(json_typable, context_domain_name, type_data):
json_type = type_data.get_json_type()
class Helper:
is_ad_hoc = False
full_name_prefix_for_use = "TypeBuilder::" + context_domain_name + "::"
full_name_prefix_for_impl = "TypeBuilder::" + context_domain_name + "::"
@staticmethod
def write_doc(writer):
if "description" in json_type:
writer.newline("/* ")
writer.append(json_type["description"])
writer.append(" */\n")
@staticmethod
def add_to_forward_listener(forward_listener):
forward_listener.add_type_data(type_data)
fixed_type_name = fix_type_name(json_type["id"])
return TypeBindings.create_type_declaration_(json_typable, context_domain_name, fixed_type_name, Helper)
@staticmethod
def create_ad_hoc_type_declaration(json_typable, context_domain_name, ad_hoc_type_context):
class Helper:
is_ad_hoc = True
full_name_prefix_for_use = ad_hoc_type_context.container_relative_name_prefix
full_name_prefix_for_impl = ad_hoc_type_context.container_full_name_prefix
@staticmethod
def write_doc(writer):
pass
@staticmethod
def add_to_forward_listener(forward_listener):
pass
fixed_type_name = ad_hoc_type_context.get_type_name_fix()
return TypeBindings.create_type_declaration_(json_typable, context_domain_name, fixed_type_name, Helper)
@staticmethod
def create_type_declaration_(json_typable, context_domain_name, fixed_type_name, helper):
if json_typable["type"] == "string":
if "enum" in json_typable:
class EnumBinding:
need_user_runtime_cast_ = False
need_internal_runtime_cast_ = False
@classmethod
def resolve_inner(cls, resolve_context):
pass
@classmethod
def request_user_runtime_cast(cls, request):
if request:
cls.need_user_runtime_cast_ = True
request.acknowledge()
@classmethod
def request_internal_runtime_cast(cls):
cls.need_internal_runtime_cast_ = True
@classmethod
def get_code_generator(enum_binding_cls):
#FIXME: generate ad-hoc enums too once we figure out how to better implement them in C++.
comment_out = helper.is_ad_hoc
class CodeGenerator:
@staticmethod
def generate_type_builder(writer, generate_context):
enum = json_typable["enum"]
helper.write_doc(writer)
enum_name = fixed_type_name.class_name
fixed_type_name.output_comment(writer)
writer.newline("struct ")
writer.append(enum_name)
writer.append(" {\n")
writer.newline(" enum Enum {\n")
for enum_item in enum:
enum_pos = EnumConstants.add_constant(enum_item)
item_c_name = enum_item.replace('-', '_')
item_c_name = Capitalizer.lower_camel_case_to_upper(item_c_name)
if item_c_name in TYPE_NAME_FIX_MAP:
item_c_name = TYPE_NAME_FIX_MAP[item_c_name]
writer.newline(" ")
writer.append(item_c_name)
writer.append(" = ")
writer.append("%s" % enum_pos)
writer.append(",\n")
writer.newline(" };\n")
if enum_binding_cls.need_user_runtime_cast_:
raise Exception("Not yet implemented")
if enum_binding_cls.need_internal_runtime_cast_:
writer.append("#if %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" static void assertCorrectValue(InspectorValue* value);\n")
writer.append("#endif // %s\n" % VALIDATOR_IFDEF_NAME)
validator_writer = generate_context.validator_writer
domain_fixes = DomainNameFixes.get_fixed_data(context_domain_name)
domain_guard = domain_fixes.get_guard()
if domain_guard:
domain_guard.generate_open(validator_writer)
validator_writer.newline("void %s%s::assertCorrectValue(InspectorValue* value)\n" % (helper.full_name_prefix_for_impl, enum_name))
validator_writer.newline("{\n")
validator_writer.newline(" WTF::String s;\n")
validator_writer.newline(" bool cast_res = value->asString(&s);\n")
validator_writer.newline(" ASSERT(cast_res);\n")
if len(enum) > 0:
condition_list = []
for enum_item in enum:
enum_pos = EnumConstants.add_constant(enum_item)
condition_list.append("s == \"%s\"" % enum_item)
validator_writer.newline(" ASSERT(%s);\n" % " || ".join(condition_list))
validator_writer.newline("}\n")
if domain_guard:
domain_guard.generate_close(validator_writer)
validator_writer.newline("\n\n")
writer.newline("}; // struct ")
writer.append(enum_name)
writer.append("\n\n")
@staticmethod
def register_use(forward_listener):
pass
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.MAIN
return CodeGenerator
@classmethod
def get_validator_call_text(cls):
return helper.full_name_prefix_for_use + fixed_type_name.class_name + "::assertCorrectValue"
@classmethod
def get_array_item_c_type_text(cls):
return helper.full_name_prefix_for_use + fixed_type_name.class_name + "::Enum"
@staticmethod
def get_setter_value_expression_pattern():
return "TypeBuilder::getEnumConstantValue(%s)"
@staticmethod
def reduce_to_raw_type():
return RawTypes.String
@staticmethod
def get_type_model():
return TypeModel.Enum(helper.full_name_prefix_for_use + fixed_type_name.class_name)
return EnumBinding
else:
if helper.is_ad_hoc:
class PlainString:
@classmethod
def resolve_inner(cls, resolve_context):
pass
@staticmethod
def request_user_runtime_cast(request):
raise Exception("Unsupported")
@staticmethod
def request_internal_runtime_cast():
pass
@staticmethod
def get_code_generator():
return None
@classmethod
def get_validator_call_text(cls):
return RawTypes.String.get_raw_validator_call_text()
@staticmethod
def reduce_to_raw_type():
return RawTypes.String
@staticmethod
def get_type_model():
return TypeModel.String
@staticmethod
def get_setter_value_expression_pattern():
return None
@classmethod
def get_array_item_c_type_text(cls):
return cls.reduce_to_raw_type().get_array_item_raw_c_type_text()
return PlainString
else:
class TypedefString:
@classmethod
def resolve_inner(cls, resolve_context):
pass
@staticmethod
def request_user_runtime_cast(request):
raise Exception("Unsupported")
@staticmethod
def request_internal_runtime_cast():
RawTypes.String.request_raw_internal_runtime_cast()
@staticmethod
def get_code_generator():
class CodeGenerator:
@staticmethod
def generate_type_builder(writer, generate_context):
helper.write_doc(writer)
fixed_type_name.output_comment(writer)
writer.newline("typedef String ")
writer.append(fixed_type_name.class_name)
writer.append(";\n\n")
@staticmethod
def register_use(forward_listener):
pass
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.TYPEDEF
return CodeGenerator
@classmethod
def get_validator_call_text(cls):
return RawTypes.String.get_raw_validator_call_text()
@staticmethod
def reduce_to_raw_type():
return RawTypes.String
@staticmethod
def get_type_model():
return TypeModel.ValueType("%s%s" % (helper.full_name_prefix_for_use, fixed_type_name.class_name), True)
@staticmethod
def get_setter_value_expression_pattern():
return None
@classmethod
def get_array_item_c_type_text(cls):
return "const %s%s&" % (helper.full_name_prefix_for_use, fixed_type_name.class_name)
return TypedefString
elif json_typable["type"] == "object":
if "properties" in json_typable:
class ClassBinding:
resolve_data_ = None
need_user_runtime_cast_ = False
need_internal_runtime_cast_ = False
@classmethod
def resolve_inner(cls, resolve_context):
if cls.resolve_data_:
return
properties = json_typable["properties"]
main = []
optional = []
ad_hoc_type_list = []
for prop in properties:
prop_name = prop["name"]
ad_hoc_type_context = cls.AdHocTypeContextImpl(prop_name, fixed_type_name.class_name, resolve_context, ad_hoc_type_list, helper.full_name_prefix_for_impl)
binding = resolve_param_type(prop, context_domain_name, ad_hoc_type_context)
code_generator = binding.get_code_generator()
if code_generator:
code_generator.register_use(resolve_context.forward_listener)
class PropertyData:
param_type_binding = binding
p = prop
if prop.get("optional"):
optional.append(PropertyData)
else:
main.append(PropertyData)
class ResolveData:
main_properties = main
optional_properties = optional
ad_hoc_types = ad_hoc_type_list
cls.resolve_data_ = ResolveData
for ad_hoc in ad_hoc_type_list:
ad_hoc.resolve_inner(resolve_context)
@classmethod
def request_user_runtime_cast(cls, request):
if not request:
return
cls.need_user_runtime_cast_ = True
request.acknowledge()
cls.request_internal_runtime_cast()
@classmethod
def request_internal_runtime_cast(cls):
if cls.need_internal_runtime_cast_:
return
cls.need_internal_runtime_cast_ = True
for p in cls.resolve_data_.main_properties:
p.param_type_binding.request_internal_runtime_cast()
for p in cls.resolve_data_.optional_properties:
p.param_type_binding.request_internal_runtime_cast()
@classmethod
def get_code_generator(class_binding_cls):
class CodeGenerator:
@classmethod
def generate_type_builder(cls, writer, generate_context):
resolve_data = class_binding_cls.resolve_data_
helper.write_doc(writer)
class_name = fixed_type_name.class_name
is_open_type = (context_domain_name + "." + class_name) in TYPES_WITH_OPEN_FIELD_LIST_SET
fixed_type_name.output_comment(writer)
writer.newline("class ")
writer.append(class_name)
writer.append(" : public ")
if is_open_type:
writer.append("InspectorObject")
else:
writer.append("InspectorObjectBase")
writer.append(" {\n")
writer.newline("public:\n")
ad_hoc_type_writer = writer.insert_writer(" ")
for ad_hoc_type in resolve_data.ad_hoc_types:
code_generator = ad_hoc_type.get_code_generator()
if code_generator:
code_generator.generate_type_builder(ad_hoc_type_writer, generate_context)
writer.newline_multiline(
""" enum {
NoFieldsSet = 0,
""")
state_enum_items = []
if len(resolve_data.main_properties) > 0:
pos = 0
for prop_data in resolve_data.main_properties:
item_name = Capitalizer.lower_camel_case_to_upper(prop_data.p["name"]) + "Set"
state_enum_items.append(item_name)
writer.newline(" %s = 1 << %s,\n" % (item_name, pos))
pos += 1
all_fields_set_value = "(" + (" | ".join(state_enum_items)) + ")"
else:
all_fields_set_value = "0"
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_1
% (all_fields_set_value, class_name, class_name))
pos = 0
for prop_data in resolve_data.main_properties:
prop_name = prop_data.p["name"]
param_type_binding = prop_data.param_type_binding
param_raw_type = param_type_binding.reduce_to_raw_type()
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_2
% (state_enum_items[pos],
Capitalizer.lower_camel_case_to_upper(prop_name),
param_type_binding.get_type_model().get_input_param_type_text(),
state_enum_items[pos], prop_name,
param_raw_type.get_setter_name(), prop_name,
format_setter_value_expression(param_type_binding, "value"),
state_enum_items[pos]))
pos += 1
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_3
% (class_name, class_name, class_name, class_name, class_name))
writer.newline(" /*\n")
writer.newline(" * Synthetic constructor:\n")
writer.newline(" * RefPtr<%s> result = %s::create()" % (class_name, class_name))
for prop_data in resolve_data.main_properties:
writer.append_multiline("\n * .set%s(...)" % Capitalizer.lower_camel_case_to_upper(prop_data.p["name"]))
writer.append_multiline(";\n */\n")
writer.newline_multiline(CodeGeneratorInspectorStrings.class_binding_builder_part_4)
writer.newline(" typedef TypeBuilder::StructItemTraits ItemTraits;\n")
for prop_data in resolve_data.optional_properties:
prop_name = prop_data.p["name"]
param_type_binding = prop_data.param_type_binding
setter_name = "set%s" % Capitalizer.lower_camel_case_to_upper(prop_name)
writer.append_multiline("\n void %s" % setter_name)
writer.append("(%s value)\n" % param_type_binding.get_type_model().get_input_param_type_text())
writer.newline(" {\n")
writer.newline(" this->set%s(\"%s\", %s);\n"
% (param_type_binding.reduce_to_raw_type().get_setter_name(), prop_data.p["name"],
format_setter_value_expression(param_type_binding, "value")))
writer.newline(" }\n")
if setter_name in INSPECTOR_OBJECT_SETTER_NAMES:
writer.newline(" using InspectorObjectBase::%s;\n\n" % setter_name)
if class_binding_cls.need_user_runtime_cast_:
writer.newline(" static PassRefPtr<%s> runtimeCast(PassRefPtr<InspectorValue> value)\n" % class_name)
writer.newline(" {\n")
writer.newline(" RefPtr<InspectorObject> object;\n")
writer.newline(" bool castRes = value->asObject(&object);\n")
writer.newline(" ASSERT_UNUSED(castRes, castRes);\n")
writer.append("#if %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" assertCorrectValue(object.get());\n")
writer.append("#endif // %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" COMPILE_ASSERT(sizeof(%s) == sizeof(InspectorObjectBase), type_cast_problem);\n" % class_name)
writer.newline(" return static_cast<%s*>(static_cast<InspectorObjectBase*>(object.get()));\n" % class_name)
writer.newline(" }\n")
writer.append("\n")
if class_binding_cls.need_internal_runtime_cast_:
writer.append("#if %s\n" % VALIDATOR_IFDEF_NAME)
writer.newline(" static void assertCorrectValue(InspectorValue* value);\n")
writer.append("#endif // %s\n" % VALIDATOR_IFDEF_NAME)
closed_field_set = (context_domain_name + "." + class_name) not in TYPES_WITH_OPEN_FIELD_LIST_SET
validator_writer = generate_context.validator_writer
domain_fixes = DomainNameFixes.get_fixed_data(context_domain_name)
domain_guard = domain_fixes.get_guard()
if domain_guard:
domain_guard.generate_open(validator_writer)
validator_writer.newline("void %s%s::assertCorrectValue(InspectorValue* value)\n" % (helper.full_name_prefix_for_impl, class_name))
validator_writer.newline("{\n")
validator_writer.newline(" RefPtr<InspectorObject> object;\n")
validator_writer.newline(" bool castRes = value->asObject(&object);\n")
validator_writer.newline(" ASSERT_UNUSED(castRes, castRes);\n")
for prop_data in resolve_data.main_properties:
validator_writer.newline(" {\n")
it_name = "%sPos" % prop_data.p["name"]
validator_writer.newline(" InspectorObject::iterator %s;\n" % it_name)
validator_writer.newline(" %s = object->find(\"%s\");\n" % (it_name, prop_data.p["name"]))
validator_writer.newline(" ASSERT(%s != object->end());\n" % it_name)
validator_writer.newline(" %s(%s->value.get());\n" % (prop_data.param_type_binding.get_validator_call_text(), it_name))
validator_writer.newline(" }\n")
if closed_field_set:
validator_writer.newline(" int foundPropertiesCount = %s;\n" % len(resolve_data.main_properties))
for prop_data in resolve_data.optional_properties:
validator_writer.newline(" {\n")
it_name = "%sPos" % prop_data.p["name"]
validator_writer.newline(" InspectorObject::iterator %s;\n" % it_name)
validator_writer.newline(" %s = object->find(\"%s\");\n" % (it_name, prop_data.p["name"]))
validator_writer.newline(" if (%s != object->end()) {\n" % it_name)
validator_writer.newline(" %s(%s->value.get());\n" % (prop_data.param_type_binding.get_validator_call_text(), it_name))
if closed_field_set:
validator_writer.newline(" ++foundPropertiesCount;\n")
validator_writer.newline(" }\n")
validator_writer.newline(" }\n")
if closed_field_set:
validator_writer.newline(" if (foundPropertiesCount != object->size()) {\n")
validator_writer.newline(" FATAL(\"Unexpected properties in object: %s\\n\", object->toJSONString().ascii().data());\n")
validator_writer.newline(" }\n")
validator_writer.newline("}\n")
if domain_guard:
domain_guard.generate_close(validator_writer)
validator_writer.newline("\n\n")
if is_open_type:
cpp_writer = generate_context.cpp_writer
writer.append("\n")
writer.newline(" // Property names for type generated as open.\n")
for prop_data in resolve_data.main_properties + resolve_data.optional_properties:
prop_name = prop_data.p["name"]
prop_field_name = Capitalizer.lower_camel_case_to_upper(prop_name)
writer.newline(" static const char* %s;\n" % (prop_field_name))
cpp_writer.newline("const char* %s%s::%s = \"%s\";\n" % (helper.full_name_prefix_for_impl, class_name, prop_field_name, prop_name))
writer.newline("};\n\n")
@staticmethod
def generate_forward_declaration(writer):
class_name = fixed_type_name.class_name
writer.newline("class ")
writer.append(class_name)
writer.append(";\n")
@staticmethod
def register_use(forward_listener):
helper.add_to_forward_listener(forward_listener)
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.MAIN
return CodeGenerator
@staticmethod
def get_validator_call_text():
return helper.full_name_prefix_for_use + fixed_type_name.class_name + "::assertCorrectValue"
@classmethod
def get_array_item_c_type_text(cls):
return helper.full_name_prefix_for_use + fixed_type_name.class_name
@staticmethod
def get_setter_value_expression_pattern():
return None
@staticmethod
def reduce_to_raw_type():
return RawTypes.Object
@staticmethod
def get_type_model():
return TypeModel.RefPtrBased(helper.full_name_prefix_for_use + fixed_type_name.class_name)
class AdHocTypeContextImpl:
def __init__(self, property_name, class_name, resolve_context, ad_hoc_type_list, parent_full_name_prefix):
self.property_name = property_name
self.class_name = class_name
self.resolve_context = resolve_context
self.ad_hoc_type_list = ad_hoc_type_list
self.container_full_name_prefix = parent_full_name_prefix + class_name + "::"
self.container_relative_name_prefix = ""
def get_type_name_fix(self):
class NameFix:
class_name = Capitalizer.lower_camel_case_to_upper(self.property_name)
@staticmethod
def output_comment(writer):
writer.newline("// Named after property name '%s' while generating %s.\n" % (self.property_name, self.class_name))
return NameFix
def add_type(self, binding):
self.ad_hoc_type_list.append(binding)
return ClassBinding
else:
class PlainObjectBinding:
@classmethod
def resolve_inner(cls, resolve_context):
pass
@staticmethod
def request_user_runtime_cast(request):
pass
@staticmethod
def request_internal_runtime_cast():
RawTypes.Object.request_raw_internal_runtime_cast()
@staticmethod
def get_code_generator():
pass
@staticmethod
def get_validator_call_text():
return "RuntimeCastHelper::assertType<InspectorValue::TypeObject>"
@classmethod
def get_array_item_c_type_text(cls):
return cls.reduce_to_raw_type().get_array_item_raw_c_type_text()
@staticmethod
def get_setter_value_expression_pattern():
return None
@staticmethod
def reduce_to_raw_type():
return RawTypes.Object
@staticmethod
def get_type_model():
return TypeModel.Object
return PlainObjectBinding
elif json_typable["type"] == "array":
if "items" in json_typable:
ad_hoc_types = []
class AdHocTypeContext:
container_full_name_prefix = "<not yet defined>"
container_relative_name_prefix = ""
@staticmethod
def get_type_name_fix():
return fixed_type_name
@staticmethod
def add_type(binding):
ad_hoc_types.append(binding)
item_binding = resolve_param_type(json_typable["items"], context_domain_name, AdHocTypeContext)
class ArrayBinding:
resolve_data_ = None
need_internal_runtime_cast_ = False
@classmethod
def resolve_inner(cls, resolve_context):
if cls.resolve_data_:
return
class ResolveData:
item_type_binding = item_binding
ad_hoc_type_list = ad_hoc_types
cls.resolve_data_ = ResolveData
for t in ad_hoc_types:
t.resolve_inner(resolve_context)
@classmethod
def request_user_runtime_cast(cls, request):
raise Exception("Not implemented yet")
@classmethod
def request_internal_runtime_cast(cls):
if cls.need_internal_runtime_cast_:
return
cls.need_internal_runtime_cast_ = True
cls.resolve_data_.item_type_binding.request_internal_runtime_cast()
@classmethod
def get_code_generator(array_binding_cls):
class CodeGenerator:
@staticmethod
def generate_type_builder(writer, generate_context):
ad_hoc_type_writer = writer
resolve_data = array_binding_cls.resolve_data_
for ad_hoc_type in resolve_data.ad_hoc_type_list:
code_generator = ad_hoc_type.get_code_generator()
if code_generator:
code_generator.generate_type_builder(ad_hoc_type_writer, generate_context)
@staticmethod
def generate_forward_declaration(writer):
pass
@staticmethod
def register_use(forward_listener):
item_code_generator = item_binding.get_code_generator()
if item_code_generator:
item_code_generator.register_use(forward_listener)
@staticmethod
def get_generate_pass_id():
return TypeBuilderPass.MAIN
return CodeGenerator
@classmethod
def get_validator_call_text(cls):
return cls.get_array_item_c_type_text() + "::assertCorrectValue"
@classmethod
def get_array_item_c_type_text(cls):
return replace_right_shift("TypeBuilder::Array<%s>" % cls.resolve_data_.item_type_binding.get_array_item_c_type_text())
@staticmethod
def get_setter_value_expression_pattern():
return None
@staticmethod
def reduce_to_raw_type():
return RawTypes.Array
@classmethod
def get_type_model(cls):
return TypeModel.RefPtrBased(cls.get_array_item_c_type_text())
return ArrayBinding
else:
# Fall-through to raw type.
pass
raw_type = RawTypes.get(json_typable["type"])
return RawTypeBinding(raw_type)
class RawTypeBinding:
def __init__(self, raw_type):
self.raw_type_ = raw_type
def resolve_inner(self, resolve_context):
pass
def request_user_runtime_cast(self, request):
raise Exception("Unsupported")
def request_internal_runtime_cast(self):
self.raw_type_.request_raw_internal_runtime_cast()
def get_code_generator(self):
return None
def get_validator_call_text(self):
return self.raw_type_.get_raw_validator_call_text()
def get_array_item_c_type_text(self):
return self.raw_type_.get_array_item_raw_c_type_text()
def get_setter_value_expression_pattern(self):
return None
def reduce_to_raw_type(self):
return self.raw_type_
def get_type_model(self):
return self.raw_type_.get_raw_type_model()
class TypeData(object):
def __init__(self, json_type, json_domain, domain_data):
self.json_type_ = json_type
self.json_domain_ = json_domain
self.domain_data_ = domain_data
if "type" not in json_type:
raise Exception("Unknown type")
json_type_name = json_type["type"]
raw_type = RawTypes.get(json_type_name)
self.raw_type_ = raw_type
self.binding_being_resolved_ = False
self.binding_ = None
def get_raw_type(self):
return self.raw_type_
def get_binding(self):
if not self.binding_:
if self.binding_being_resolved_:
raise Error("Type %s is already being resolved" % self.json_type_["type"])
# Resolve only lazily, because resolving one named type may require resolving some other named type.
self.binding_being_resolved_ = True
try:
self.binding_ = TypeBindings.create_named_type_declaration(self.json_type_, self.json_domain_["domain"], self)
finally:
self.binding_being_resolved_ = False
return self.binding_
def get_json_type(self):
return self.json_type_
def get_name(self):
return self.json_type_["id"]
def get_domain_name(self):
return self.json_domain_["domain"]
class DomainData:
def __init__(self, json_domain):
self.json_domain = json_domain
self.types_ = []
def add_type(self, type_data):
self.types_.append(type_data)
def name(self):
return self.json_domain["domain"]
def types(self):
return self.types_
class TypeMap:
def __init__(self, api):
self.map_ = {}
self.domains_ = []
for json_domain in api["domains"]:
domain_name = json_domain["domain"]
domain_map = {}
self.map_[domain_name] = domain_map
domain_data = DomainData(json_domain)
self.domains_.append(domain_data)
if "types" in json_domain:
for json_type in json_domain["types"]:
type_name = json_type["id"]
type_data = TypeData(json_type, json_domain, domain_data)
domain_map[type_name] = type_data
domain_data.add_type(type_data)
def domains(self):
return self.domains_
def get(self, domain_name, type_name):
return self.map_[domain_name][type_name]
def resolve_param_type(json_parameter, scope_domain_name, ad_hoc_type_context):
if "$ref" in json_parameter:
json_ref = json_parameter["$ref"]
type_data = get_ref_data(json_ref, scope_domain_name)
return type_data.get_binding()
elif "type" in json_parameter:
result = TypeBindings.create_ad_hoc_type_declaration(json_parameter, scope_domain_name, ad_hoc_type_context)
ad_hoc_type_context.add_type(result)
return result
else:
raise Exception("Unknown type")
def resolve_param_raw_type(json_parameter, scope_domain_name):
if "$ref" in json_parameter:
json_ref = json_parameter["$ref"]
type_data = get_ref_data(json_ref, scope_domain_name)
return type_data.get_raw_type()
elif "type" in json_parameter:
json_type = json_parameter["type"]
return RawTypes.get(json_type)
else:
raise Exception("Unknown type")
def get_ref_data(json_ref, scope_domain_name):
dot_pos = json_ref.find(".")
if dot_pos == -1:
domain_name = scope_domain_name
type_name = json_ref
else:
domain_name = json_ref[:dot_pos]
type_name = json_ref[dot_pos + 1:]
return type_map.get(domain_name, type_name)
input_file = open(input_json_filename, "r")
json_string = input_file.read()
json_api = json.loads(json_string)
class Templates:
def get_this_script_path_(absolute_path):
absolute_path = os.path.abspath(absolute_path)
components = []
def fill_recursive(path_part, depth):
if depth <= 0 or path_part == '/':
return
fill_recursive(os.path.dirname(path_part), depth - 1)
components.append(os.path.basename(path_part))
# Typical path is /Source/WebCore/inspector/CodeGeneratorInspector.py
# Let's take 4 components from the real path then.
fill_recursive(absolute_path, 4)
return "/".join(components)
file_header_ = ("// File is generated by %s\n\n" % get_this_script_path_(sys.argv[0]) +
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
""")
frontend_domain_class = string.Template(CodeGeneratorInspectorStrings.frontend_domain_class)
backend_method = string.Template(CodeGeneratorInspectorStrings.backend_method)
frontend_method = string.Template(CodeGeneratorInspectorStrings.frontend_method)
callback_method = string.Template(CodeGeneratorInspectorStrings.callback_method)
frontend_h = string.Template(file_header_ + CodeGeneratorInspectorStrings.frontend_h)
backend_h = string.Template(file_header_ + CodeGeneratorInspectorStrings.backend_h)
backend_cpp = string.Template(file_header_ + CodeGeneratorInspectorStrings.backend_cpp)
frontend_cpp = string.Template(file_header_ + CodeGeneratorInspectorStrings.frontend_cpp)
typebuilder_h = string.Template(file_header_ + CodeGeneratorInspectorStrings.typebuilder_h)
typebuilder_cpp = string.Template(file_header_ + CodeGeneratorInspectorStrings.typebuilder_cpp)
backend_js = string.Template(file_header_ + CodeGeneratorInspectorStrings.backend_js)
param_container_access_code = CodeGeneratorInspectorStrings.param_container_access_code
type_map = TypeMap(json_api)
class NeedRuntimeCastRequest:
def __init__(self):
self.ack_ = None
def acknowledge(self):
self.ack_ = True
def is_acknowledged(self):
return self.ack_
def resolve_all_types():
runtime_cast_generate_requests = {}
for type_name in TYPES_WITH_RUNTIME_CAST_SET:
runtime_cast_generate_requests[type_name] = NeedRuntimeCastRequest()
class ForwardListener:
type_data_set = set()
already_declared_set = set()
@classmethod
def add_type_data(cls, type_data):
if type_data not in cls.already_declared_set:
cls.type_data_set.add(type_data)
class ResolveContext:
forward_listener = ForwardListener
for domain_data in type_map.domains():
for type_data in domain_data.types():
# Do not generate forwards for this type any longer.
ForwardListener.already_declared_set.add(type_data)
binding = type_data.get_binding()
binding.resolve_inner(ResolveContext)
for domain_data in type_map.domains():
for type_data in domain_data.types():
full_type_name = "%s.%s" % (type_data.get_domain_name(), type_data.get_name())
request = runtime_cast_generate_requests.pop(full_type_name, None)
binding = type_data.get_binding()
if request:
binding.request_user_runtime_cast(request)
if request and not request.is_acknowledged():
raise Exception("Failed to generate runtimeCast in " + full_type_name)
for full_type_name in runtime_cast_generate_requests:
raise Exception("Failed to generate runtimeCast. Type " + full_type_name + " not found")
return ForwardListener
global_forward_listener = resolve_all_types()
def get_annotated_type_text(raw_type, annotated_type):
if annotated_type != raw_type:
return "/*%s*/ %s" % (annotated_type, raw_type)
else:
return raw_type
def format_setter_value_expression(param_type_binding, value_ref):
pattern = param_type_binding.get_setter_value_expression_pattern()
if pattern:
return pattern % value_ref
else:
return value_ref
class Generator:
frontend_class_field_lines = []
frontend_domain_class_lines = []
method_name_enum_list = []
backend_method_declaration_list = []
backend_method_implementation_list = []
backend_method_name_declaration_list = []
method_handler_list = []
frontend_method_list = []
backend_js_domain_initializer_list = []
backend_virtual_setters_list = []
backend_agent_interface_list = []
backend_setters_list = []
backend_constructor_init_list = []
backend_field_list = []
frontend_constructor_init_list = []
type_builder_fragments = []
type_builder_forwards = []
validator_impl_list = []
type_builder_impl_list = []
@staticmethod
def go():
Generator.process_types(type_map)
first_cycle_guardable_list_list = [
Generator.backend_method_declaration_list,
Generator.backend_method_implementation_list,
Generator.backend_method_name_declaration_list,
Generator.backend_agent_interface_list,
Generator.frontend_class_field_lines,
Generator.frontend_constructor_init_list,
Generator.frontend_domain_class_lines,
Generator.frontend_method_list,
Generator.method_handler_list,
Generator.method_name_enum_list,
Generator.backend_constructor_init_list,
Generator.backend_virtual_setters_list,
Generator.backend_setters_list,
Generator.backend_field_list]
for json_domain in json_api["domains"]:
domain_name = json_domain["domain"]
domain_name_lower = domain_name.lower()
domain_fixes = DomainNameFixes.get_fixed_data(domain_name)
domain_guard = domain_fixes.get_guard()
if domain_guard:
for l in first_cycle_guardable_list_list:
domain_guard.generate_open(l)
agent_field_name = domain_fixes.agent_field_name
frontend_method_declaration_lines = []
Generator.backend_js_domain_initializer_list.append("// %s.\n" % domain_name)
if not domain_fixes.skip_js_bind:
Generator.backend_js_domain_initializer_list.append("InspectorBackend.register%sDispatcher = InspectorBackend.registerDomainDispatcher.bind(InspectorBackend, \"%s\");\n" % (domain_name, domain_name))
if "types" in json_domain:
for json_type in json_domain["types"]:
if "type" in json_type and json_type["type"] == "string" and "enum" in json_type:
enum_name = "%s.%s" % (domain_name, json_type["id"])
Generator.process_enum(json_type, enum_name)
elif json_type["type"] == "object":
if "properties" in json_type:
for json_property in json_type["properties"]:
if "type" in json_property and json_property["type"] == "string" and "enum" in json_property:
enum_name = "%s.%s%s" % (domain_name, json_type["id"], to_title_case(json_property["name"]))
Generator.process_enum(json_property, enum_name)
if "events" in json_domain:
for json_event in json_domain["events"]:
Generator.process_event(json_event, domain_name, frontend_method_declaration_lines)
Generator.frontend_class_field_lines.append(" %s m_%s;\n" % (domain_name, domain_name_lower))
if Generator.frontend_constructor_init_list:
Generator.frontend_constructor_init_list.append(" , ")
Generator.frontend_constructor_init_list.append("m_%s(inspectorFrontendChannel)\n" % domain_name_lower)
Generator.frontend_domain_class_lines.append(Templates.frontend_domain_class.substitute(None,
domainClassName=domain_name,
domainFieldName=domain_name_lower,
frontendDomainMethodDeclarations="".join(flatten_list(frontend_method_declaration_lines))))
agent_interface_name = Capitalizer.lower_camel_case_to_upper(domain_name) + "CommandHandler"
Generator.backend_agent_interface_list.append(" class %s {\n" % agent_interface_name)
Generator.backend_agent_interface_list.append(" public:\n")
if "commands" in json_domain:
for json_command in json_domain["commands"]:
Generator.process_command(json_command, domain_name, agent_field_name, agent_interface_name)
Generator.backend_agent_interface_list.append("\n protected:\n")
Generator.backend_agent_interface_list.append(" virtual ~%s() { }\n" % agent_interface_name)
Generator.backend_agent_interface_list.append(" };\n\n")
Generator.backend_constructor_init_list.append(" , m_%s(0)" % agent_field_name)
Generator.backend_virtual_setters_list.append(" virtual void registerAgent(%s* %s) = 0;" % (agent_interface_name, agent_field_name))
Generator.backend_setters_list.append(" virtual void registerAgent(%s* %s) { ASSERT(!m_%s); m_%s = %s; }" % (agent_interface_name, agent_field_name, agent_field_name, agent_field_name, agent_field_name))
Generator.backend_field_list.append(" %s* m_%s;" % (agent_interface_name, agent_field_name))
if domain_guard:
for l in reversed(first_cycle_guardable_list_list):
domain_guard.generate_close(l)
Generator.backend_js_domain_initializer_list.append("\n")
@staticmethod
def process_enum(json_enum, enum_name):
enum_members = []
for member in json_enum["enum"]:
enum_members.append("%s: \"%s\"" % (fix_camel_case(member), member))
Generator.backend_js_domain_initializer_list.append("InspectorBackend.registerEnum(\"%s\", {%s});\n" % (
enum_name, ", ".join(enum_members)))
@staticmethod
def process_event(json_event, domain_name, frontend_method_declaration_lines):
event_name = json_event["name"]
ad_hoc_type_output = []
frontend_method_declaration_lines.append(ad_hoc_type_output)
ad_hoc_type_writer = Writer(ad_hoc_type_output, " ")
decl_parameter_list = []
json_parameters = json_event.get("parameters")
Generator.generate_send_method(json_parameters, event_name, domain_name, ad_hoc_type_writer,
decl_parameter_list,
Generator.EventMethodStructTemplate,
Generator.frontend_method_list, Templates.frontend_method, {"eventName": event_name})
backend_js_event_param_list = []
if json_parameters:
for parameter in json_parameters:
parameter_name = parameter["name"]
backend_js_event_param_list.append("\"%s\"" % parameter_name)
frontend_method_declaration_lines.append(
" void %s(%s);\n" % (event_name, ", ".join(decl_parameter_list)))
Generator.backend_js_domain_initializer_list.append("InspectorBackend.registerEvent(\"%s.%s\", [%s]);\n" % (
domain_name, event_name, ", ".join(backend_js_event_param_list)))
class EventMethodStructTemplate:
@staticmethod
def append_prolog(line_list):
line_list.append(" RefPtr<InspectorObject> paramsObject = InspectorObject::create();\n")
@staticmethod
def append_epilog(line_list):
line_list.append(" jsonMessage->setObject(\"params\", paramsObject);\n")
container_name = "paramsObject"
@staticmethod
def process_command(json_command, domain_name, agent_field_name, agent_interface_name):
json_command_name = json_command["name"]
cmd_enum_name = "k%s_%sCmd" % (domain_name, json_command["name"])
Generator.method_name_enum_list.append(" %s," % cmd_enum_name)
Generator.method_handler_list.append(" &InspectorBackendDispatcherImpl::%s_%s," % (domain_name, json_command_name))
Generator.backend_method_declaration_list.append(" void %s_%s(long callId, InspectorObject* requestMessageObject);" % (domain_name, json_command_name))
ad_hoc_type_output = []
Generator.backend_agent_interface_list.append(ad_hoc_type_output)
ad_hoc_type_writer = Writer(ad_hoc_type_output, " ")
Generator.backend_agent_interface_list.append(" virtual void %s(ErrorString*" % json_command_name)
method_in_code = ""
method_out_code = ""
agent_call_param_list = []
response_cook_list = []
request_message_param = ""
js_parameters_text = ""
if "parameters" in json_command:
json_params = json_command["parameters"]
method_in_code += Templates.param_container_access_code
request_message_param = " requestMessageObject"
js_param_list = []
for json_parameter in json_params:
json_param_name = json_parameter["name"]
param_raw_type = resolve_param_raw_type(json_parameter, domain_name)
getter_name = param_raw_type.get_getter_name()
optional = json_parameter.get("optional")
non_optional_type_model = param_raw_type.get_raw_type_model()
if optional:
type_model = non_optional_type_model.get_optional()
else:
type_model = non_optional_type_model
if optional:
code = (" bool %s_valueFound = false;\n"
" %s in_%s = get%s(paramsContainerPtr, \"%s\", &%s_valueFound, protocolErrorsPtr);\n" %
(json_param_name, non_optional_type_model.get_command_return_pass_model().get_return_var_type(), json_param_name, getter_name, json_param_name, json_param_name))
param = ", %s_valueFound ? &in_%s : 0" % (json_param_name, json_param_name)
# FIXME: pass optional refptr-values as PassRefPtr
formal_param_type_pattern = "const %s*"
else:
code = (" %s in_%s = get%s(paramsContainerPtr, \"%s\", 0, protocolErrorsPtr);\n" %
(non_optional_type_model.get_command_return_pass_model().get_return_var_type(), json_param_name, getter_name, json_param_name))
param = ", in_%s" % json_param_name
# FIXME: pass not-optional refptr-values as NonNullPassRefPtr
if param_raw_type.is_heavy_value():
formal_param_type_pattern = "const %s&"
else:
formal_param_type_pattern = "%s"
method_in_code += code
agent_call_param_list.append(param)
Generator.backend_agent_interface_list.append(", %s in_%s" % (formal_param_type_pattern % non_optional_type_model.get_command_return_pass_model().get_return_var_type(), json_param_name))
js_bind_type = param_raw_type.get_js_bind_type()
js_param_text = "{\"name\": \"%s\", \"type\": \"%s\", \"optional\": %s}" % (
json_param_name,
js_bind_type,
("true" if ("optional" in json_parameter and json_parameter["optional"]) else "false"))
js_param_list.append(js_param_text)
js_parameters_text = ", ".join(js_param_list)
response_cook_text = ""
if json_command.get("async") == True:
callback_name = Capitalizer.lower_camel_case_to_upper(json_command_name) + "Callback"
callback_output = []
callback_writer = Writer(callback_output, ad_hoc_type_writer.get_indent())
decl_parameter_list = []
Generator.generate_send_method(json_command.get("returns"), json_command_name, domain_name, ad_hoc_type_writer,
decl_parameter_list,
Generator.CallbackMethodStructTemplate,
Generator.backend_method_implementation_list, Templates.callback_method,
{"callbackName": callback_name, "agentName": agent_interface_name})
callback_writer.newline("class " + callback_name + " : public CallbackBase {\n")
callback_writer.newline("public:\n")
callback_writer.newline(" " + callback_name + "(PassRefPtr<InspectorBackendDispatcherImpl>, int id);\n")
callback_writer.newline(" void sendSuccess(" + ", ".join(decl_parameter_list) + ");\n")
callback_writer.newline("};\n")
ad_hoc_type_output.append(callback_output)
method_out_code += " RefPtr<" + agent_interface_name + "::" + callback_name + "> callback = adoptRef(new " + agent_interface_name + "::" + callback_name + "(this, callId));\n"
agent_call_param_list.append(", callback")
response_cook_text += " if (!error.length()) \n"
response_cook_text += " return;\n"
response_cook_text += " callback->disable();\n"
Generator.backend_agent_interface_list.append(", PassRefPtr<%s> callback" % callback_name)
else:
if "returns" in json_command:
method_out_code += "\n"
for json_return in json_command["returns"]:
json_return_name = json_return["name"]
optional = bool(json_return.get("optional"))
return_type_binding = Generator.resolve_type_and_generate_ad_hoc(json_return, json_command_name, domain_name, ad_hoc_type_writer, agent_interface_name + "::")
raw_type = return_type_binding.reduce_to_raw_type()
setter_type = raw_type.get_setter_name()
initializer = raw_type.get_c_initializer()
type_model = return_type_binding.get_type_model()
if optional:
type_model = type_model.get_optional()
code = " %s out_%s;\n" % (type_model.get_command_return_pass_model().get_return_var_type(), json_return_name)
param = ", %sout_%s" % (type_model.get_command_return_pass_model().get_output_argument_prefix(), json_return_name)
var_name = "out_%s" % json_return_name
setter_argument = type_model.get_command_return_pass_model().get_output_to_raw_expression() % var_name
if return_type_binding.get_setter_value_expression_pattern():
setter_argument = return_type_binding.get_setter_value_expression_pattern() % setter_argument
cook = " result->set%s(\"%s\", %s);\n" % (setter_type, json_return_name,
setter_argument)
set_condition_pattern = type_model.get_command_return_pass_model().get_set_return_condition()
if set_condition_pattern:
cook = (" if (%s)\n " % (set_condition_pattern % var_name)) + cook
annotated_type = type_model.get_command_return_pass_model().get_output_parameter_type()
param_name = "out_%s" % json_return_name
if optional:
param_name = "opt_" + param_name
Generator.backend_agent_interface_list.append(", %s %s" % (annotated_type, param_name))
response_cook_list.append(cook)
method_out_code += code
agent_call_param_list.append(param)
response_cook_text = "".join(response_cook_list)
if len(response_cook_text) != 0:
response_cook_text = " if (!error.length()) {\n" + response_cook_text + " }"
backend_js_reply_param_list = []
if "returns" in json_command:
for json_return in json_command["returns"]:
json_return_name = json_return["name"]
backend_js_reply_param_list.append("\"%s\"" % json_return_name)
js_reply_list = "[%s]" % ", ".join(backend_js_reply_param_list)
Generator.backend_method_implementation_list.append(Templates.backend_method.substitute(None,
domainName=domain_name, methodName=json_command_name,
agentField="m_" + agent_field_name,
methodInCode=method_in_code,
methodOutCode=method_out_code,
agentCallParams="".join(agent_call_param_list),
requestMessageObject=request_message_param,
responseCook=response_cook_text,
commandNameIndex=cmd_enum_name))
Generator.backend_method_name_declaration_list.append(" \"%s.%s\"," % (domain_name, json_command_name))
Generator.backend_js_domain_initializer_list.append("InspectorBackend.registerCommand(\"%s.%s\", [%s], %s);\n" % (domain_name, json_command_name, js_parameters_text, js_reply_list))
Generator.backend_agent_interface_list.append(") = 0;\n")
class CallbackMethodStructTemplate:
@staticmethod
def append_prolog(line_list):
pass
@staticmethod
def append_epilog(line_list):
pass
container_name = "jsonMessage"
# Generates common code for event sending and callback response data sending.
@staticmethod
def generate_send_method(parameters, event_name, domain_name, ad_hoc_type_writer, decl_parameter_list,
method_struct_template,
generator_method_list, method_template, template_params):
method_line_list = []
if parameters:
method_struct_template.append_prolog(method_line_list)
for json_parameter in parameters:
parameter_name = json_parameter["name"]
param_type_binding = Generator.resolve_type_and_generate_ad_hoc(json_parameter, event_name, domain_name, ad_hoc_type_writer, "")
raw_type = param_type_binding.reduce_to_raw_type()
raw_type_binding = RawTypeBinding(raw_type)
optional = bool(json_parameter.get("optional"))
setter_type = raw_type.get_setter_name()
type_model = param_type_binding.get_type_model()
raw_type_model = raw_type_binding.get_type_model()
if optional:
type_model = type_model.get_optional()
raw_type_model = raw_type_model.get_optional()
annotated_type = type_model.get_input_param_type_text()
mode_type_binding = param_type_binding
decl_parameter_list.append("%s %s" % (annotated_type, parameter_name))
setter_argument = raw_type_model.get_event_setter_expression_pattern() % parameter_name
if mode_type_binding.get_setter_value_expression_pattern():
setter_argument = mode_type_binding.get_setter_value_expression_pattern() % setter_argument
setter_code = " %s->set%s(\"%s\", %s);\n" % (method_struct_template.container_name, setter_type, parameter_name, setter_argument)
if optional:
setter_code = (" if (%s)\n " % parameter_name) + setter_code
method_line_list.append(setter_code)
method_struct_template.append_epilog(method_line_list)
generator_method_list.append(method_template.substitute(None,
domainName=domain_name,
parameters=", ".join(decl_parameter_list),
code="".join(method_line_list), **template_params))
@staticmethod
def resolve_type_and_generate_ad_hoc(json_param, method_name, domain_name, ad_hoc_type_writer, container_relative_name_prefix_param):
param_name = json_param["name"]
ad_hoc_type_list = []
class AdHocTypeContext:
container_full_name_prefix = "<not yet defined>"
container_relative_name_prefix = container_relative_name_prefix_param
@staticmethod
def get_type_name_fix():
class NameFix:
class_name = Capitalizer.lower_camel_case_to_upper(param_name)
@staticmethod
def output_comment(writer):
writer.newline("// Named after parameter '%s' while generating command/event %s.\n" % (param_name, method_name))
return NameFix
@staticmethod
def add_type(binding):
ad_hoc_type_list.append(binding)
type_binding = resolve_param_type(json_param, domain_name, AdHocTypeContext)
class InterfaceForwardListener:
@staticmethod
def add_type_data(type_data):
pass
class InterfaceResolveContext:
forward_listener = InterfaceForwardListener
for type in ad_hoc_type_list:
type.resolve_inner(InterfaceResolveContext)
class InterfaceGenerateContext:
validator_writer = "not supported in InterfaceGenerateContext"
cpp_writer = validator_writer
for type in ad_hoc_type_list:
generator = type.get_code_generator()
if generator:
generator.generate_type_builder(ad_hoc_type_writer, InterfaceGenerateContext)
return type_binding
@staticmethod
def process_types(type_map):
output = Generator.type_builder_fragments
class GenerateContext:
validator_writer = Writer(Generator.validator_impl_list, "")
cpp_writer = Writer(Generator.type_builder_impl_list, "")
def generate_all_domains_code(out, type_data_callback):
writer = Writer(out, "")
for domain_data in type_map.domains():
domain_fixes = DomainNameFixes.get_fixed_data(domain_data.name())
domain_guard = domain_fixes.get_guard()
namespace_declared = []
def namespace_lazy_generator():
if not namespace_declared:
if domain_guard:
domain_guard.generate_open(out)
writer.newline("namespace ")
writer.append(domain_data.name())
writer.append(" {\n")
# What is a better way to change value from outer scope?
namespace_declared.append(True)
return writer
for type_data in domain_data.types():
type_data_callback(type_data, namespace_lazy_generator)
if namespace_declared:
writer.append("} // ")
writer.append(domain_data.name())
writer.append("\n\n")
if domain_guard:
domain_guard.generate_close(out)
def create_type_builder_caller(generate_pass_id):
def call_type_builder(type_data, writer_getter):
code_generator = type_data.get_binding().get_code_generator()
if code_generator and generate_pass_id == code_generator.get_generate_pass_id():
writer = writer_getter()
code_generator.generate_type_builder(writer, GenerateContext)
return call_type_builder
generate_all_domains_code(output, create_type_builder_caller(TypeBuilderPass.MAIN))
Generator.type_builder_forwards.append("// Forward declarations.\n")
def generate_forward_callback(type_data, writer_getter):
if type_data in global_forward_listener.type_data_set:
binding = type_data.get_binding()
binding.get_code_generator().generate_forward_declaration(writer_getter())
generate_all_domains_code(Generator.type_builder_forwards, generate_forward_callback)
Generator.type_builder_forwards.append("// End of forward declarations.\n\n")
Generator.type_builder_forwards.append("// Typedefs.\n")
generate_all_domains_code(Generator.type_builder_forwards, create_type_builder_caller(TypeBuilderPass.TYPEDEF))
Generator.type_builder_forwards.append("// End of typedefs.\n\n")
def flatten_list(input):
res = []
def fill_recursive(l):
for item in l:
if isinstance(item, list):
fill_recursive(item)
else:
res.append(item)
fill_recursive(input)
return res
# A writer that only updates file if it actually changed to better support incremental build.
class SmartOutput:
def __init__(self, file_name):
self.file_name_ = file_name
self.output_ = ""
def write(self, text):
self.output_ += text
def close(self):
text_changed = True
try:
read_file = open(self.file_name_, "r")
old_text = read_file.read()
read_file.close()
text_changed = old_text != self.output_
except:
# Ignore, just overwrite by default
pass
if text_changed or write_always:
out_file = open(self.file_name_, "w")
out_file.write(self.output_)
out_file.close()
Generator.go()
backend_h_file = SmartOutput(output_header_dirname + "/InspectorBackendDispatcher.h")
backend_cpp_file = SmartOutput(output_cpp_dirname + "/InspectorBackendDispatcher.cpp")
frontend_h_file = SmartOutput(output_header_dirname + "/InspectorFrontend.h")
frontend_cpp_file = SmartOutput(output_cpp_dirname + "/InspectorFrontend.cpp")
typebuilder_h_file = SmartOutput(output_header_dirname + "/InspectorTypeBuilder.h")
typebuilder_cpp_file = SmartOutput(output_cpp_dirname + "/InspectorTypeBuilder.cpp")
backend_js_file = SmartOutput(output_cpp_dirname + "/InspectorBackendCommands.js")
backend_h_file.write(Templates.backend_h.substitute(None,
virtualSetters="\n".join(Generator.backend_virtual_setters_list),
agentInterfaces="".join(flatten_list(Generator.backend_agent_interface_list)),
methodNamesEnumContent="\n".join(Generator.method_name_enum_list)))
backend_cpp_file.write(Templates.backend_cpp.substitute(None,
constructorInit="\n".join(Generator.backend_constructor_init_list),
setters="\n".join(Generator.backend_setters_list),
fieldDeclarations="\n".join(Generator.backend_field_list),
methodNameDeclarations="\n".join(Generator.backend_method_name_declaration_list),
methods="\n".join(Generator.backend_method_implementation_list),
methodDeclarations="\n".join(Generator.backend_method_declaration_list),
messageHandlers="\n".join(Generator.method_handler_list)))
frontend_h_file.write(Templates.frontend_h.substitute(None,
fieldDeclarations="".join(Generator.frontend_class_field_lines),
domainClassList="".join(Generator.frontend_domain_class_lines)))
frontend_cpp_file.write(Templates.frontend_cpp.substitute(None,
constructorInit="".join(Generator.frontend_constructor_init_list),
methods="\n".join(Generator.frontend_method_list)))
typebuilder_h_file.write(Templates.typebuilder_h.substitute(None,
typeBuilders="".join(flatten_list(Generator.type_builder_fragments)),
forwards="".join(Generator.type_builder_forwards),
validatorIfdefName=VALIDATOR_IFDEF_NAME))
typebuilder_cpp_file.write(Templates.typebuilder_cpp.substitute(None,
enumConstantValues=EnumConstants.get_enum_constant_code(),
implCode="".join(flatten_list(Generator.type_builder_impl_list)),
validatorCode="".join(flatten_list(Generator.validator_impl_list)),
validatorIfdefName=VALIDATOR_IFDEF_NAME))
backend_js_file.write(Templates.backend_js.substitute(None,
domainInitializers="".join(Generator.backend_js_domain_initializer_list)))
backend_h_file.close()
backend_cpp_file.close()
frontend_h_file.close()
frontend_cpp_file.close()
typebuilder_h_file.close()
typebuilder_cpp_file.close()
backend_js_file.close()
| bsd-3-clause |
faroit/loudness | python/tests/test_FrameGenerator.py | 1 | 1471 | import numpy as np
import loudness as ln
fs = 32000
N = 10000
x = np.arange(0, N)
# Input SignalBank
bufSize = 32
nEars = 2
nChannels = 1
inputBank = ln.SignalBank()
inputBank.initialize(nEars, nChannels, bufSize, int(fs))
# Frame generator
frameSize = 2048
hopSize = 32
startAtWindowCentre = True
frameGen = ln.FrameGenerator(frameSize, hopSize, startAtWindowCentre)
frameGen.initialize(inputBank)
outputBank = frameGen.getOutput()
nBlocks = int(x.size / bufSize)
if startAtWindowCentre:
nProcessedBlocks = int(nBlocks - 0.5 * frameSize / hopSize + 1)
else:
nProcessedBlocks = int(nBlocks - frameSize / hopSize + 1)
frames = np.zeros((nEars, nProcessedBlocks, frameSize))
frameIndex = 0
for block in range(nBlocks):
# Update input bank
idx = block * bufSize
inputBank.setSignal(0, 0, x[idx:idx + bufSize])
inputBank.setSignal(1, 0, x[idx:idx + bufSize])
# process it
frameGen.process(inputBank)
# get output
if(outputBank.getTrig()):
frames[:, frameIndex, :] = outputBank.getSignals().reshape((2, frameSize))
frameIndex += 1
# Check frames are correct
if startAtWindowCentre:
x = np.hstack((np.zeros(np.ceil((frameSize - 1) / 2.0)), x))
for ear in range(nEars):
for i, frame in enumerate(frames[ear]):
start = i * hopSize
if all(frame == x[start:start + frameSize]):
print("Frame number %d correct" % i)
else:
print("Frame number %d incorrect" % i)
| gpl-3.0 |
aricchen/openHR | openerp/addons/account_followup/wizard/__init__.py | 437 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
minhtuancn/odoo | addons/association/__openerp__.py | 260 | 1700 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
elba7r/builder | frappe/patches/v5_0/update_shared.py | 20 | 1398 | import frappe
import frappe.share
def execute():
frappe.reload_doc("core", "doctype", "docperm")
frappe.reload_doc("core", "doctype", "docshare")
frappe.reload_doc('email', 'doctype', 'email_account')
# default share to all writes
frappe.db.sql("""update tabDocPerm set `share`=1 where ifnull(`write`,0)=1 and ifnull(`permlevel`,0)=0""")
# every user must have access to his / her own detail
users = frappe.get_all("User", filters={"user_type": "System User"})
usernames = [user.name for user in users]
for user in usernames:
frappe.share.add("User", user, user, write=1, share=1)
# move event user to shared
if frappe.db.exists("DocType", "Event User"):
for event in frappe.get_all("Event User", fields=["parent", "person"]):
if event.person in usernames:
if not frappe.db.exists("Event", event.parent):
frappe.db.sql("delete from `tabEvent User` where parent = %s",event.parent)
else:
frappe.share.add("Event", event.parent, event.person, write=1)
frappe.delete_doc("DocType", "Event User")
# move note user to shared
if frappe.db.exists("DocType", "Note User"):
for note in frappe.get_all("Note User", fields=["parent", "user", "permission"]):
perm = {"read": 1} if note.permission=="Read" else {"write": 1}
if note.user in usernames:
frappe.share.add("Note", note.parent, note.user, **perm)
frappe.delete_doc("DocType", "Note User")
| mit |
robinkraft/cloudless | src/cloudless/train/predict.py | 1 | 7335 | import os
import caffe
import numpy as np
import plyvel
import skimage
from caffe_pb2 import Datum
import constants
def predict(image_path):
"""
Takes a single image, and makes a prediction whether it has a cloud or not.
"""
print "Generating prediction for %s..." % image_path
net, transformer = _initialize_caffe()
im = caffe.io.load_image(image_path)
prob = _predict_image(im, net, transformer)
print "Probability this image has a cloud: {0:.2f}%".format(prob)
def test_validation():
"""
Takes validation images and runs them through a trained model to see how
well they do. Generates statistics like precision and recall, F1, and a confusion matrix,
in order to gauge progress.
"""
print "Generating predictions for validation images..."
validation_data = _load_validation_data()
target_details = _run_through_caffe(validation_data)
statistics = _calculate_positives_negatives(target_details)
accuracy = _calculate_accuracy(statistics)
precision = _calculate_precision(statistics)
recall = _calculate_recall(statistics)
f1 = _calculate_f1(precision, recall)
# TODO: Write these out to a file as well as the screen.
results = ""
results += "\n"
results += "\nStatistics on validation dataset using threshold %f:" % constants.THRESHOLD
results += "\n\tAccuracy: {0:.2f}%".format(accuracy)
results += "\n\tPrecision: %.2f" % precision
results += "\n\tRecall: %.2f" % recall
results += "\n\tF1 Score: %.2f" % f1
results += "\n"
results += _print_confusion_matrix(statistics)
print results
with open(constants.OUTPUT_LOG_PREFIX + ".statistics.txt", "w") as f:
f.write(results)
def _load_validation_data():
"""
Loads all of our validation data from our leveldb database, producing unrolled numpy input
vectors ready to test along with their correct, expected target values.
"""
print "\tLoading validation data..."
input_vectors = []
expected_targets = []
db = plyvel.DB(constants.VALIDATION_FILE)
for key, value in db:
datum = Datum()
datum.ParseFromString(value)
data = np.fromstring(datum.data, dtype=np.uint8)
data = np.reshape(data, (3, constants.HEIGHT, constants.WIDTH))
# Move the color channel to the end to match what Caffe wants.
data = np.swapaxes(data, 0, 2) # Swap channel with width.
data = np.swapaxes(data, 0, 1) # Swap width with height, to yield final h x w x channel.
input_vectors.append(data)
expected_targets.append(datum.label)
db.close()
print "\t\tValidation data has %d images" % len(input_vectors)
return {
"input_vectors": np.asarray(input_vectors),
"expected_targets": np.asarray(expected_targets)
}
def _initialize_caffe():
"""
Initializes Caffe to prepare to run some data through the model for inference.
"""
caffe.set_mode_gpu()
net = caffe.Net(constants.DEPLOY_FILE, constants.WEIGHTS_FINETUNED, caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
# PIL.Image loads the data with the channel last.
# TODO: Think through whether these should be BGR during training and validation.
transformer.set_transpose("data", (2, 0, 1))
# Mean pixel.
transformer.set_mean("data", np.load(constants.TRAINING_MEAN_PICKLE).mean(1).mean(1))
# The reference model operates on images in [0, 255] range instead of [0, 1].
transformer.set_raw_scale("data", 255)
# The reference model has channels in BGR order instead of RGB.
transformer.set_channel_swap("data", (2, 1, 0))
net.blobs["data"].reshape(1, 3, constants.INFERENCE_HEIGHT, constants.INFERENCE_WIDTH)
return (net, transformer)
def _run_through_caffe(validation_data):
"""
Runs our validation images through Caffe.
"""
print "\tInitializing Caffe..."
net, transformer = _initialize_caffe()
print "\tComputing probabilities using Caffe..."
results = []
for idx in range(len(validation_data["input_vectors"])):
im = validation_data["input_vectors"][idx]
prob = _predict_image(im, net, transformer)
expected_target = validation_data["expected_targets"][idx]
predicted_target = 0
if prob >= constants.THRESHOLD:
predicted_target = 1
results.append({
"expected_target": expected_target,
"predicted_target": predicted_target
})
return results
def _predict_image(im, net, transformer):
"""
Given a caffe.io.load_image, returns the probability that it contains a cloud.
"""
net.blobs["data"].data[...] = transformer.preprocess("data", im)
out = net.forward()
probs = out["prob"][0]
prob_cloud = probs[1] * 100.0
return prob_cloud
def _calculate_positives_negatives(target_details):
"""
Takes expected and actual target values, generating true and false positives and negatives,
including the actual correct # of positive and negative values.
"""
true_positive = 0
true_negative = 0
false_negative = 0
false_positive = 0
actual_positive = 0
actual_negative = 0
for idx in range(len(target_details)):
predicted_target = target_details[idx]["predicted_target"]
expected_target = target_details[idx]["expected_target"]
if expected_target == 1:
actual_positive = actual_positive + 1
else:
actual_negative = actual_negative + 1
if predicted_target == 1 and expected_target == 1:
true_positive = true_positive + 1
elif predicted_target == 0 and expected_target == 0:
true_negative = true_negative + 1
elif predicted_target == 1 and expected_target == 0:
false_positive = false_positive + 1
elif predicted_target == 0 and expected_target == 1:
false_negative = false_negative + 1
return {
"true_positive": float(true_positive),
"false_positive": float(false_positive),
"actual_positive": float(actual_positive),
"true_negative": float(true_negative),
"false_negative": float(false_negative),
"actual_negative": float(actual_negative),
}
def _calculate_accuracy(s):
top = (s["true_positive"] + s["true_negative"])
bottom = (s["actual_positive"] + s["actual_negative"])
return (top / bottom) * 100.0
def _calculate_precision(s):
return s["true_positive"] / (s["true_positive"] + s["false_positive"])
def _calculate_recall(s):
return s["true_positive"] / (s["true_positive"] + s["false_negative"])
def _calculate_f1(precision, recall):
return 2.0 * ((precision * recall) / (precision + recall))
def _print_confusion_matrix(s):
results = ""
results += "\nConfusion matrix:"
results += "\n\t\t\t\tPositive\t\tNegative"
results += "\nPositive (%d)\t\t\tTrue Positive (%d)\tFalse Positive (%d)" % \
(s["actual_positive"], s["true_positive"], s["false_positive"])
results += "\nNegative (%d)\t\t\tFalse Negative (%d)\tTrue Negative (%d)" % \
(s["actual_negative"], s["false_negative"], s["true_negative"])
return results
| apache-2.0 |
carlomt/dicom_tools | dicom_tools/pyqtgraph/exporters/CSVExporter.py | 44 | 2798 | from ..Qt import QtGui, QtCore
from .Exporter import Exporter
from ..parametertree import Parameter
from .. import PlotItem
__all__ = ['CSVExporter']
class CSVExporter(Exporter):
Name = "CSV from plot data"
windows = []
def __init__(self, item):
Exporter.__init__(self, item)
self.params = Parameter(name='params', type='group', children=[
{'name': 'separator', 'type': 'list', 'value': 'comma', 'values': ['comma', 'tab']},
{'name': 'precision', 'type': 'int', 'value': 10, 'limits': [0, None]},
{'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']}
])
def parameters(self):
return self.params
def export(self, fileName=None):
if not isinstance(self.item, PlotItem):
raise Exception("Must have a PlotItem selected for CSV export.")
if fileName is None:
self.fileSaveDialog(filter=["*.csv", "*.tsv"])
return
fd = open(fileName, 'w')
data = []
header = []
appendAllX = self.params['columnMode'] == '(x,y) per plot'
for i, c in enumerate(self.item.curves):
cd = c.getData()
if cd[0] is None:
continue
data.append(cd)
if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:
name = c.name().replace('"', '""') + '_'
xName, yName = '"'+name+'x"', '"'+name+'y"'
else:
xName = 'x%04d' % i
yName = 'y%04d' % i
if appendAllX or i == 0:
header.extend([xName, yName])
else:
header.extend([yName])
if self.params['separator'] == 'comma':
sep = ','
else:
sep = '\t'
fd.write(sep.join(header) + '\n')
i = 0
numFormat = '%%0.%dg' % self.params['precision']
numRows = max([len(d[0]) for d in data])
for i in range(numRows):
for j, d in enumerate(data):
# write x value if this is the first column, or if we want x
# for all rows
if appendAllX or j == 0:
if d is not None and i < len(d[0]):
fd.write(numFormat % d[0][i] + sep)
else:
fd.write(' %s' % sep)
# write y value
if d is not None and i < len(d[1]):
fd.write(numFormat % d[1][i] + sep)
else:
fd.write(' %s' % sep)
fd.write('\n')
fd.close()
CSVExporter.register()
| mit |
odootr/odoo | addons/account_analytic_default/account_analytic_default.py | 57 | 9022 | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_default(osv.osv):
_name = "account.analytic.default"
_description = "Analytic Distribution"
_rec_name = "analytic_id"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of analytic distribution"),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Select a product which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this product, it will automatically take this as an analytic account)"),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='cascade', help="Select a partner which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this partner, it will automatically take this as an analytic account)"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', help="Select a user which will use analytic account specified in analytic default."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', help="Select a company which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this company, it will automatically take this as an analytic account)"),
'date_start': fields.date('Start Date', help="Default start date for this Analytic Account."),
'date_stop': fields.date('End Date', help="Default end date for this Analytic Account."),
}
def account_get(self, cr, uid, product_id=None, partner_id=None, user_id=None, date=None, company_id=None, context=None):
domain = []
if product_id:
domain += ['|', ('product_id', '=', product_id)]
domain += [('product_id','=', False)]
if partner_id:
domain += ['|', ('partner_id', '=', partner_id)]
domain += [('partner_id', '=', False)]
if company_id:
domain += ['|', ('company_id', '=', company_id)]
domain += [('company_id', '=', False)]
if user_id:
domain += ['|',('user_id', '=', user_id)]
domain += [('user_id','=', False)]
if date:
domain += ['|', ('date_start', '<=', date), ('date_start', '=', False)]
domain += ['|', ('date_stop', '>=', date), ('date_stop', '=', False)]
best_index = -1
res = False
for rec in self.browse(cr, uid, self.search(cr, uid, domain, context=context), context=context):
index = 0
if rec.product_id: index += 1
if rec.partner_id: index += 1
if rec.company_id: index += 1
if rec.user_id: index += 1
if rec.date_start: index += 1
if rec.date_stop: index += 1
if index > best_index:
res = rec
best_index = index
return res
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_description = "Invoice Line"
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id=currency_id, company_id=company_id, context=context)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=company_id, context=context)
if rec:
res_prod['value'].update({'account_analytic_id': rec.analytic_id.id})
else:
res_prod['value'].update({'account_analytic_id': False})
return res_prod
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
partner_id = picking.partner_id and picking.partner_id.id or False
rec = self.pool.get('account.analytic.default').account_get(cursor, user, move_line.product_id.id, partner_id, user, time.strftime('%Y-%m-%d'))
if rec:
return rec.analytic_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
if not ids:
return create_ids
sale_line = self.browse(cr, uid, ids[0], context=context)
inv_line_obj = self.pool.get('account.invoice.line')
anal_def_obj = self.pool.get('account.analytic.default')
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, sale_line.order_id.user_id.id, time.strftime('%Y-%m-%d'), context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'account_analytic_id': rec.analytic_id.id}, context=context)
return create_ids
class product_product(osv.Model):
_inherit = 'product.product'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
return {
product_id: Analytic.search_count(cr, uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
res = {}
for product_tmpl_id in self.browse(cr, uid, ids, context=context):
res[product_tmpl_id.id] = sum([p.rules_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
def action_view_rules(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'account_analytic_default.action_product_default_list', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
# Remove context so it is not going to filter on product_id with active_id of template
result['context'] = "{}"
return result
class stock_move(osv.Model):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
# It will set the default analtyic account on the invoice line
partner_id = self.pool['account.invoice'].browse(cr, uid, invoice_line_vals.get('invoice_id'), context=context).partner_id.id
if 'account_analytic_id' not in invoice_line_vals or not invoice_line_vals.get('account_analytic_id'):
rec = self.pool['account.analytic.default'].account_get(cr, uid, move.product_id.id, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=move.company_id.id, context=context)
if rec:
invoice_line_vals.update({'account_analytic_id': rec.analytic_id.id})
res = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vmthunder/nova | nova/scheduler/filters/num_instances_filter.py | 15 | 2837 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
default=50,
help="Ignore hosts that have too many instances")
CONF = cfg.CONF
CONF.register_opt(max_instances_per_host_opt)
class NumInstancesFilter(filters.BaseHostFilter):
"""Filter out hosts with too many instances."""
def _get_max_instances_per_host(self, host_state, filter_properties):
return CONF.max_instances_per_host
def host_passes(self, host_state, filter_properties):
num_instances = host_state.num_instances
max_instances = self._get_max_instances_per_host(
host_state, filter_properties)
passes = num_instances < max_instances
if not passes:
LOG.debug("%(host_state)s fails num_instances check: Max "
"instances per host is set to %(max_instances)s",
{'host_state': host_state,
'max_instances': max_instances})
return passes
class AggregateNumInstancesFilter(NumInstancesFilter):
"""AggregateNumInstancesFilter with per-aggregate the max num instances.
Fall back to global max_num_instances_per_host if no per-aggregate setting
found.
"""
def _get_max_instances_per_host(self, host_state, filter_properties):
# TODO(uni): DB query in filter is a performance hit, especially for
# system with lots of hosts. Will need a general solutnumn here to fix
# all filters with aggregate DB call things.
aggregate_vals = utils.aggregate_values_from_db(
filter_properties['context'],
host_state.host,
'max_instances_per_host')
try:
value = utils.validate_num_values(
aggregate_vals, CONF.max_instances_per_host, cast_to=int)
except ValueError as e:
LOG.warn(_LW("Could not decode max_instances_per_host: '%s'"), e)
value = CONF.max_instances_per_host
return value
| apache-2.0 |
uiri/pxqz | venv/lib/python2.7/site-packages/django/core/serializers/pyyaml.py | 81 | 2170 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import decimal
import yaml
from django.db import models
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.safe_load(stream), **options):
yield obj
except GeneratorExit:
raise
except Exception, e:
# Map to deserializer error
raise DeserializationError(e)
| gpl-3.0 |
Johnzero/OE7 | openerp/addons-modules/hr_attendance/wizard/hr_attendance_error.py | 7 | 2918 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_attendance_error(osv.osv_memory):
_name = 'hr.attendance.error'
_description = 'Print Error Attendance Report'
_columns = {
'init_date': fields.date('Starting Date', required=True),
'end_date': fields.date('Ending Date', required=True),
'max_delay': fields.integer('Max. Delay (Min)', required=True)
}
_defaults = {
'init_date': lambda *a: time.strftime('%Y-%m-%d'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'max_delay': 120,
}
def print_report(self, cr, uid, ids, context=None):
emp_ids = []
data_error = self.read(cr, uid, ids, context=context)[0]
date_from = data_error['init_date']
date_to = data_error['end_date']
cr.execute("SELECT id FROM hr_attendance WHERE employee_id IN %s AND to_char(name,'YYYY-mm-dd')<=%s AND to_char(name,'YYYY-mm-dd')>=%s AND action IN %s ORDER BY name" ,(tuple(context['active_ids']), date_to, date_from, tuple(['sign_in','sign_out'])))
attendance_ids = [x[0] for x in cr.fetchall()]
if not attendance_ids:
raise osv.except_osv(_('No Data Available !'), _('No records are found for your selection!'))
attendance_records = self.pool.get('hr.attendance').browse(cr, uid, attendance_ids, context=context)
for rec in attendance_records:
if rec.employee_id.id not in emp_ids:
emp_ids.append(rec.employee_id.id)
data_error['emp_ids'] = emp_ids
datas = {
'ids': [],
'model': 'hr.employee',
'form': data_error
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'hr.attendance.error',
'datas': datas,
}
hr_attendance_error()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
azureplus/hue | desktop/core/ext-py/Paste-2.0.1/paste/exceptions/reporter.py | 50 | 4576 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import time
try:
from socket import sslerror
except ImportError:
sslerror = None
from paste.exceptions import formatter
class Reporter(object):
def __init__(self, **conf):
for name, value in conf.items():
if not hasattr(self, name):
raise TypeError(
"The keyword argument %s was not expected"
% name)
setattr(self, name, value)
self.check_params()
def check_params(self):
pass
def format_date(self, exc_data):
return time.strftime('%c', exc_data.date)
def format_html(self, exc_data, **kw):
return formatter.format_html(exc_data, **kw)
def format_text(self, exc_data, **kw):
return formatter.format_text(exc_data, **kw)
class EmailReporter(Reporter):
to_addresses = None
from_address = None
smtp_server = 'localhost'
smtp_username = None
smtp_password = None
smtp_use_tls = False
subject_prefix = ''
def report(self, exc_data):
msg = self.assemble_email(exc_data)
server = smtplib.SMTP(self.smtp_server)
if self.smtp_use_tls:
server.ehlo()
server.starttls()
server.ehlo()
if self.smtp_username and self.smtp_password:
server.login(self.smtp_username, self.smtp_password)
server.sendmail(self.from_address,
self.to_addresses, msg.as_string())
try:
server.quit()
except sslerror:
# sslerror is raised in tls connections on closing sometimes
pass
def check_params(self):
if not self.to_addresses:
raise ValueError("You must set to_addresses")
if not self.from_address:
raise ValueError("You must set from_address")
if isinstance(self.to_addresses, (str, unicode)):
self.to_addresses = [self.to_addresses]
def assemble_email(self, exc_data):
short_html_version = self.format_html(
exc_data, show_hidden_frames=False)
long_html_version = self.format_html(
exc_data, show_hidden_frames=True)
text_version = self.format_text(
exc_data, show_hidden_frames=False)
msg = MIMEMultipart()
msg.set_type('multipart/alternative')
msg.preamble = msg.epilogue = ''
text_msg = MIMEText(text_version)
text_msg.set_type('text/plain')
text_msg.set_param('charset', 'ASCII')
msg.attach(text_msg)
html_msg = MIMEText(short_html_version)
html_msg.set_type('text/html')
# @@: Correct character set?
html_msg.set_param('charset', 'UTF-8')
html_long = MIMEText(long_html_version)
html_long.set_type('text/html')
html_long.set_param('charset', 'UTF-8')
msg.attach(html_msg)
msg.attach(html_long)
subject = '%s: %s' % (exc_data.exception_type,
formatter.truncate(str(exc_data.exception_value)))
msg['Subject'] = self.subject_prefix + subject
msg['From'] = self.from_address
msg['To'] = ', '.join(self.to_addresses)
return msg
class LogReporter(Reporter):
filename = None
show_hidden_frames = True
def check_params(self):
assert self.filename is not None, (
"You must give a filename")
def report(self, exc_data):
text = self.format_text(
exc_data, show_hidden_frames=self.show_hidden_frames)
f = open(self.filename, 'a')
try:
f.write(text + '\n' + '-'*60 + '\n')
finally:
f.close()
class FileReporter(Reporter):
file = None
show_hidden_frames = True
def check_params(self):
assert self.file is not None, (
"You must give a file object")
def report(self, exc_data):
text = self.format_text(
exc_data, show_hidden_frames=self.show_hidden_frames)
self.file.write(text + '\n' + '-'*60 + '\n')
class WSGIAppReporter(Reporter):
def __init__(self, exc_data):
self.exc_data = exc_data
def __call__(self, environ, start_response):
start_response('500 Server Error', [('Content-type', 'text/html')])
return [formatter.format_html(self.exc_data)]
| apache-2.0 |
pradeepnazareth/NS-3-begining | waf-tools/clang_compilation_database.py | 99 | 1830 | #!/usr/bin/env python
# encoding: utf-8
# Christoph Koke, 2013
"""
Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html
Usage:
def configure(conf):
conf.load('compiler_cxx')
...
conf.load('clang_compilation_database')
"""
import sys, os, json, shlex, pipes
from waflib import Logs, TaskGen
from waflib.Tools import c, cxx
if sys.hexversion >= 0x3030000:
quote = shlex.quote
else:
quote = pipes.quote
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as JSON"
database_file = ctx.bldnode.make_node('compile_commands.json')
Logs.info("Build commands will be stored in %s" % database_file.path_from(ctx.path))
try:
root = json.load(database_file)
except IOError:
root = []
clang_db = dict((x["file"], x) for x in root)
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
directory = getattr(task, 'cwd', ctx.variant_dir)
f_node = task.inputs[0]
filename = os.path.relpath(f_node.abspath(), directory)
cmd = " ".join(map(quote, cmd))
entry = {
"directory": directory,
"command": cmd,
"file": filename,
}
clang_db[filename] = entry
root = list(clang_db.values())
database_file.write(json.dumps(root, indent=2))
| gpl-2.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/encodings/iso2022_kr.py | 816 | 1053 | #
# iso2022_kr.py: Python Unicode Codec for ISO2022_KR
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-3.0 |
conan-io/conan | conans/test/functional/scm/tools/test_git.py | 1 | 15486 | # coding=utf-8
import os
import re
import subprocess
import unittest
import pytest
import six
from mock import patch
from parameterized import parameterized
from conans.client import tools
from conans.client.tools.scm import Git
from conans.errors import ConanException
from conans.test.utils.scm import create_local_git_repo
from conans.test.utils.tools import temp_folder, TestClient
from conans.util.files import save
@pytest.mark.tool_git
class GitRemoteUrlTest(unittest.TestCase):
def test_remove_credentials(self):
""" Check that the 'remove_credentials' argument is taken into account """
expected_url = 'https://myrepo.com/path/to/repo.git'
origin_url = 'https://username:password@myrepo.com/path/to/repo.git'
git = Git(folder=temp_folder())
git.run("init .")
git.run("remote add origin {}".format(origin_url))
self.assertEqual(git.get_remote_url(), origin_url)
self.assertEqual(git.get_remote_url(remove_credentials=True), expected_url)
@pytest.mark.tool_git
class GitToolTest(unittest.TestCase):
@patch('subprocess.Popen')
def test_version(self, mocked_open):
mocked_open.return_value.communicate.return_value = ('git version 2.21.0'.encode(), None)
version = Git.get_version()
self.assertEqual(version, "2.21.0")
@patch('subprocess.Popen')
def test_version_invalid(self, mocked_open):
mocked_open.return_value.communicate.return_value = ('failed'.encode(), None)
with self.assertRaises(ConanException):
Git.get_version()
def test_repo_root(self):
root_path, _ = create_local_git_repo({"myfile": "anything"})
# Initialized in the root folder
git = Git(root_path)
self.assertEqual(root_path, git.get_repo_root())
# Initialized elsewhere
subfolder = os.path.join(root_path, 'subfolder')
os.makedirs(subfolder)
git = Git(subfolder)
self.assertEqual(root_path, git.get_repo_root())
def test_is_pristine(self):
root_path, _ = create_local_git_repo({"myfile": "anything"})
git = Git(root_path)
self.assertTrue(git.is_pristine())
save(os.path.join(root_path, "other_file"), "content")
self.assertFalse(git.is_pristine())
git.run("add .")
self.assertFalse(git.is_pristine())
git.run('commit -m "commit"')
self.assertTrue(git.is_pristine())
def test_is_local_repository(self):
root_path, _ = create_local_git_repo({"myfile": "anything"})
git = Git(temp_folder())
git.clone(root_path)
self.assertTrue(git.is_local_repository())
# TODO: Check that with remote one it is working too
def test_clone_git(self):
path, _ = create_local_git_repo({"myfile": "contents"})
tmp = temp_folder()
git = Git(tmp)
git.clone(path)
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
@parameterized.expand([(None,), # default
("develop",), # branch name
("1.0",), # tag name
("HEAD",), # expression
])
def test_clone_git_shallow(self, element):
path, revision = create_local_git_repo({"myfile": "contents"}, commits=3, tags=["1.0"], branch="develop")
tmp = temp_folder()
git = Git(tmp)
git.clone("file://" + path, branch=element, shallow=True) # --depth is ignored in local clones
with self.assertRaises(subprocess.CalledProcessError):
git.checkout(element="HEAD~1")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertEqual(git.get_revision(), revision)
self.assertEqual(git.run("rev-list --all --count"), "1")
def test_clone_git_shallow_revision(self):
path, revision = create_local_git_repo({"myfile": "contents"}, commits=3, tags=["1.0"], branch="develop")
tmp = temp_folder()
git = Git(tmp)
if Git.get_version() < "2.13":
# older Git versions have known bugs with "git fetch origin <sha>":
# https://github.com/git/git/blob/master/Documentation/RelNotes/2.13.0.txt
# * "git fetch" that requests a commit by object name, when the other
# side does not allow such an request, failed without much
# explanation.
# https://github.com/git/git/blob/master/Documentation/RelNotes/2.14.0.txt
# * There is no good reason why "git fetch $there $sha1" should fail
# when the $sha1 names an object at the tip of an advertised ref,
# even when the other side hasn't enabled allowTipSHA1InWant.
with self.assertRaises(subprocess.CalledProcessError):
git.clone("file://" + path, branch=revision, shallow=True)
else:
git.clone("file://" + path, branch=revision, shallow=True)
with self.assertRaises(subprocess.CalledProcessError):
git.checkout(element="HEAD~1")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertEqual(git.get_revision(), revision)
self.assertEqual(git.run("rev-list --all --count"), "1")
def test_clone_git_shallow_with_local(self):
path, revision = create_local_git_repo({"repofile": "contents"}, commits=3)
tmp = temp_folder()
save(os.path.join(tmp, "localfile"), "contents")
save(os.path.join(tmp, "indexfile"), "contents")
git = Git(tmp)
git.run("init")
git.run("add indexfile")
git.clone("file://" + path, branch="master", shallow=True) # --depth is ignored in local clones
self.assertTrue(os.path.exists(os.path.join(tmp, "repofile")))
self.assertTrue(os.path.exists(os.path.join(tmp, "localfile")))
self.assertTrue(os.path.exists(os.path.join(tmp, "indexfile")))
self.assertEqual(git.get_revision(), revision)
self.assertEqual(git.run("rev-list --all --count"), "1")
def test_clone_existing_folder_git(self):
path, commit = create_local_git_repo({"myfile": "contents"}, branch="my_release")
tmp = temp_folder()
save(os.path.join(tmp, "file"), "dummy contents")
git = Git(tmp)
git.clone(path, branch="my_release")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
# Checkout a commit
git.checkout(commit)
self.assertEqual(git.get_revision(), commit)
def test_clone_existing_folder_without_branch(self):
tmp = temp_folder()
save(os.path.join(tmp, "file"), "dummy contents")
git = Git(tmp)
with six.assertRaisesRegex(self, ConanException, "specify a branch to checkout"):
git.clone("https://github.com/conan-io/hooks.git")
def test_credentials(self):
tmp = temp_folder()
git = Git(tmp, username="peter", password="otool")
url_credentials = git.get_url_with_credentials("https://some.url.com")
self.assertEqual(url_credentials, "https://peter:otool@some.url.com")
def test_verify_ssl(self):
class MyRunner(object):
def __init__(self):
self.calls = []
def __call__(self, *args, **kwargs):
self.calls.append(args[0])
return ""
runner = MyRunner()
tmp = temp_folder()
git = Git(tmp, username="peter", password="otool", verify_ssl=True, runner=runner,
force_english=True)
git.clone(url="https://myrepo.git")
self.assertIn("git -c http.sslVerify=true", runner.calls[0])
runner = MyRunner()
git = Git(tmp, username="peter", password="otool", verify_ssl=False, runner=runner,
force_english=False)
git.clone(url="https://myrepo.git")
self.assertIn("git -c http.sslVerify=false", runner.calls[0])
def test_clone_submodule_git(self):
subsubmodule, _ = create_local_git_repo({"subsubmodule": "contents"})
submodule, _ = create_local_git_repo({"submodule": "contents"}, submodules=[subsubmodule])
path, commit = create_local_git_repo({"myfile": "contents"}, submodules=[submodule])
def _create_paths():
tmp = temp_folder()
submodule_path = os.path.join(
tmp,
os.path.basename(os.path.normpath(submodule)))
subsubmodule_path = os.path.join(
submodule_path,
os.path.basename(os.path.normpath(subsubmodule)))
return tmp, submodule_path, subsubmodule_path
# Check old (default) behaviour
tmp, submodule_path, _ = _create_paths()
git = Git(tmp)
git.clone(path)
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertFalse(os.path.exists(os.path.join(submodule_path, "submodule")))
# Check invalid value
tmp, submodule_path, _ = _create_paths()
git = Git(tmp)
git.clone(path)
with six.assertRaisesRegex(self, ConanException,
"Invalid 'submodule' attribute value in the 'scm'."):
git.checkout(commit, submodule="invalid")
# Check shallow
tmp, submodule_path, subsubmodule_path = _create_paths()
git = Git(tmp)
git.clone(path)
git.checkout(commit, submodule="shallow")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertTrue(os.path.exists(os.path.join(submodule_path, "submodule")))
self.assertFalse(os.path.exists(os.path.join(subsubmodule_path, "subsubmodule")))
# Check recursive
tmp, submodule_path, subsubmodule_path = _create_paths()
git = Git(tmp)
git.clone(path)
git.checkout(commit, submodule="recursive")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
self.assertTrue(os.path.exists(os.path.join(submodule_path, "submodule")))
self.assertTrue(os.path.exists(os.path.join(subsubmodule_path, "subsubmodule")))
def test_git_to_capture_branch(self):
conanfile = """
import re
from conans import ConanFile, tools
def get_version():
git = tools.Git()
try:
branch = git.get_branch()
branch = re.sub('[^0-9a-zA-Z]+', '_', branch)
return "%s_%s" % (branch, git.get_revision())
except:
return None
class HelloConan(ConanFile):
name = "Hello"
version = get_version()
def build(self):
assert("r3le_ase__" in self.version)
assert(len(self.version) == 50)
"""
path, _ = create_local_git_repo({"conanfile.py": conanfile}, branch="r3le-ase-")
client = TestClient()
client.current_folder = path
client.run("create . user/channel")
def test_git_helper_in_recipe(self):
client = TestClient()
git_repo = temp_folder()
save(os.path.join(git_repo, "file.h"), "contents")
with client.chdir(git_repo):
client.run_command("git init .")
client.run_command('git config user.email "you@example.com"')
client.run_command('git config user.name "Your Name"')
client.run_command("git checkout -b dev")
client.run_command("git add .")
client.run_command('git commit -m "comm"')
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
git = tools.Git()
git.clone("%s", "dev")
def build(self):
assert(os.path.exists("file.h"))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel")
# Now clone in a subfolder with later checkout
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
tools.mkdir("src")
git = tools.Git("./src")
git.clone("%s")
git.checkout("dev")
def build(self):
assert(os.path.exists(os.path.join("src", "file.h")))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel")
# Base dir, with exports without subfolder and not specifying checkout fails
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
git = tools.Git()
git.clone("%s")
def build(self):
assert(os.path.exists("file.h"))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel", assert_error=True)
self.assertIn("specify a branch to checkout", client.out)
def test_git_commit_message(self):
client = TestClient()
git_repo = temp_folder()
with client.chdir(git_repo):
client.run_command("git init .")
client.run_command('git config user.email "you@example.com"')
client.run_command('git config user.name "Your Name"')
client.run_command("git checkout -b dev")
git = Git(git_repo)
self.assertIsNone(git.get_commit_message())
save(os.path.join(git_repo, "test"), "contents")
with client.chdir(git_repo):
client.run_command("git add test")
client.run_command('git commit -m "first commit"')
self.assertEqual("dev", git.get_branch())
self.assertEqual("first commit", git.get_commit_message())
@pytest.mark.tool_git
class GitToolsTests(unittest.TestCase):
def setUp(self):
self.folder, self.rev = create_local_git_repo({'myfile.txt': "contents"})
def test_no_tag(self):
"""
No tags has been created in repo
"""
git = Git(folder=self.folder)
tag = git.get_tag()
self.assertIsNone(tag)
def test_in_tag(self):
"""
Current checkout is on a tag
"""
git = Git(folder=self.folder)
git.run("tag 0.0.0")
tag = git.get_tag()
self.assertEqual("0.0.0", tag)
def test_in_branch_with_tag(self):
"""
Tag is defined but current commit is ahead of it
"""
git = Git(folder=self.folder)
git.run("tag 0.0.0")
save(os.path.join(self.folder, "file.txt"), "")
git.run("add .")
git.run("commit -m \"new file\"")
tag = git.get_tag()
self.assertIsNone(tag)
def test_get_tag_no_git_repo(self):
# Try to get tag out of a git repo
tmp_folder = temp_folder()
git = Git(folder=tmp_folder)
pattern = "'{0}' is not a valid 'git' repository or 'git' not found".format(
re.escape(tmp_folder))
with six.assertRaisesRegex(self, ConanException, pattern):
git.get_tag()
def test_excluded_files(self):
folder = temp_folder()
save(os.path.join(folder, "file"), "some contents")
git = Git(folder)
with tools.environment_append({"PATH": ""}):
excluded = git.excluded_files()
self.assertEqual(excluded, [])
| mit |
zaccoz/odoo | addons/account/wizard/account_statement_from_invoice.py | 224 | 4128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_name = "account.statement.from.invoice.lines"
_description = "Entries by Statement from Invoices"
_columns = {
'line_ids': fields.many2many('account.move.line', 'account_move_line_relation', 'move_id', 'line_id', 'Invoices'),
}
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
statement = statement_obj.browse(cr, uid, statement_id, context=context)
line_date = statement.date
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
if line.company_id.currency_id.id != statement.currency.id:
# In the specific case where the company currency and the statement currency are the same
# the debit/credit field already contains the amount in the right currency.
# We therefore avoid to re-convert the amount in the currency, to prevent Gain/loss exchanges
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, line.amount_currency, context=ctx)
elif (line.invoice and line.invoice.currency_id.id != statement.currency.id):
amount = currency_obj.compute(cr, uid, line.invoice.currency_id.id,
statement.currency.id, amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
'amount_currency': line.amount_currency,
'currency_id': line.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/third_party/ply/test/testlex.py | 62 | 23233 | # testlex.py
import unittest
try:
import StringIO
except ImportError:
import io as StringIO
import sys
import os
import imp
import warnings
sys.path.insert(0,"..")
sys.tracebacklimit = 0
import ply.lex
def make_pymodule_path(filename):
path = os.path.dirname(filename)
file = os.path.basename(filename)
mod, ext = os.path.splitext(file)
if sys.hexversion >= 0x3020000:
modname = mod+"."+imp.get_tag()+ext
fullpath = os.path.join(path,'__pycache__',modname)
else:
fullpath = filename
return fullpath
def pymodule_out_exists(filename):
return os.path.exists(make_pymodule_path(filename))
def pymodule_out_remove(filename):
os.remove(make_pymodule_path(filename))
def check_expected(result,expected):
if sys.version_info[0] >= 3:
if isinstance(result,str):
result = result.encode('ascii')
if isinstance(expected,str):
expected = expected.encode('ascii')
resultlines = result.splitlines()
expectedlines = expected.splitlines()
if len(resultlines) != len(expectedlines):
return False
for rline,eline in zip(resultlines,expectedlines):
if not rline.endswith(eline):
return False
return True
def run_import(module):
code = "import "+module
exec(code)
del sys.modules[module]
# Tests related to errors and warnings when building lexers
class LexErrorWarningTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
if sys.hexversion >= 0x3020000:
warnings.filterwarnings('ignore',category=ResourceWarning)
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_lex_doc1(self):
self.assertRaises(SyntaxError,run_import,"lex_doc1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_doc1.py:18: No regular expression defined for rule 't_NUMBER'\n"))
def test_lex_dup1(self):
self.assertRaises(SyntaxError,run_import,"lex_dup1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup1.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_dup2(self):
self.assertRaises(SyntaxError,run_import,"lex_dup2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup2.py:22: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_dup3(self):
self.assertRaises(SyntaxError,run_import,"lex_dup3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup3.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_empty(self):
self.assertRaises(SyntaxError,run_import,"lex_empty")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No rules of the form t_rulename are defined\n"
"No rules defined for state 'INITIAL'\n"))
def test_lex_error1(self):
run_import("lex_error1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No t_error rule is defined\n"))
def test_lex_error2(self):
self.assertRaises(SyntaxError,run_import,"lex_error2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Rule 't_error' must be defined as a function\n")
)
def test_lex_error3(self):
self.assertRaises(SyntaxError,run_import,"lex_error3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_error3.py:20: Rule 't_error' requires an argument\n"))
def test_lex_error4(self):
self.assertRaises(SyntaxError,run_import,"lex_error4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_error4.py:20: Rule 't_error' has too many arguments\n"))
def test_lex_ignore(self):
self.assertRaises(SyntaxError,run_import,"lex_ignore")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_ignore.py:20: Rule 't_ignore' must be defined as a string\n"))
def test_lex_ignore2(self):
run_import("lex_ignore2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"t_ignore contains a literal backslash '\\'\n"))
def test_lex_re1(self):
self.assertRaises(SyntaxError,run_import,"lex_re1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid regular expression for rule 't_NUMBER'. unbalanced parenthesis\n"))
def test_lex_re2(self):
self.assertRaises(SyntaxError,run_import,"lex_re2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Regular expression for rule 't_PLUS' matches empty string\n"))
def test_lex_re3(self):
self.assertRaises(SyntaxError,run_import,"lex_re3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid regular expression for rule 't_POUND'. unbalanced parenthesis\n"
"Make sure '#' in rule 't_POUND' is escaped with '\\#'\n"))
def test_lex_rule1(self):
self.assertRaises(SyntaxError,run_import,"lex_rule1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"t_NUMBER not defined as a function or string\n"))
def test_lex_rule2(self):
self.assertRaises(SyntaxError,run_import,"lex_rule2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_rule2.py:18: Rule 't_NUMBER' requires an argument\n"))
def test_lex_rule3(self):
self.assertRaises(SyntaxError,run_import,"lex_rule3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_rule3.py:18: Rule 't_NUMBER' has too many arguments\n"))
def test_lex_state1(self):
self.assertRaises(SyntaxError,run_import,"lex_state1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"states must be defined as a tuple or list\n"))
def test_lex_state2(self):
self.assertRaises(SyntaxError,run_import,"lex_state2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid state specifier 'comment'. Must be a tuple (statename,'exclusive|inclusive')\n"
"Invalid state specifier 'example'. Must be a tuple (statename,'exclusive|inclusive')\n"))
def test_lex_state3(self):
self.assertRaises(SyntaxError,run_import,"lex_state3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State name 1 must be a string\n"
"No rules defined for state 'example'\n"))
def test_lex_state4(self):
self.assertRaises(SyntaxError,run_import,"lex_state4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State type for state comment must be 'inclusive' or 'exclusive'\n"))
def test_lex_state5(self):
self.assertRaises(SyntaxError,run_import,"lex_state5")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State 'comment' already defined\n"))
def test_lex_state_noerror(self):
run_import("lex_state_noerror")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No error rule is defined for exclusive state 'comment'\n"))
def test_lex_state_norule(self):
self.assertRaises(SyntaxError,run_import,"lex_state_norule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No rules defined for state 'example'\n"))
def test_lex_token1(self):
self.assertRaises(SyntaxError,run_import,"lex_token1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No token list is defined\n"
"Rule 't_NUMBER' defined for an unspecified token NUMBER\n"
"Rule 't_PLUS' defined for an unspecified token PLUS\n"
"Rule 't_MINUS' defined for an unspecified token MINUS\n"
))
def test_lex_token2(self):
self.assertRaises(SyntaxError,run_import,"lex_token2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"tokens must be a list or tuple\n"
"Rule 't_NUMBER' defined for an unspecified token NUMBER\n"
"Rule 't_PLUS' defined for an unspecified token PLUS\n"
"Rule 't_MINUS' defined for an unspecified token MINUS\n"
))
def test_lex_token3(self):
self.assertRaises(SyntaxError,run_import,"lex_token3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Rule 't_MINUS' defined for an unspecified token MINUS\n"))
def test_lex_token4(self):
self.assertRaises(SyntaxError,run_import,"lex_token4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Bad token name '-'\n"))
def test_lex_token5(self):
try:
run_import("lex_token5")
except ply.lex.LexError:
e = sys.exc_info()[1]
self.assert_(check_expected(str(e),"lex_token5.py:19: Rule 't_NUMBER' returned an unknown token type 'NUM'"))
def test_lex_token_dup(self):
run_import("lex_token_dup")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Token 'MINUS' multiply defined\n"))
def test_lex_literal1(self):
self.assertRaises(SyntaxError,run_import,"lex_literal1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid literal '**'. Must be a single character\n"))
def test_lex_literal2(self):
self.assertRaises(SyntaxError,run_import,"lex_literal2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid literals specification. literals must be a sequence of characters\n"))
import os
import subprocess
import shutil
# Tests related to various build options associated with lexers
class LexBuildOptionTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
try:
shutil.rmtree("lexdir")
except OSError:
pass
def test_lex_module(self):
run_import("lex_module")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_object(self):
run_import("lex_object")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_closure(self):
run_import("lex_closure")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_optimize(self):
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("lextab.pyc")
except OSError:
pass
try:
os.remove("lextab.pyo")
except OSError:
pass
run_import("lex_optimize")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lextab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lextab.pyo"))
pymodule_out_remove("lextab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lextab.pyo"))
try:
os.remove("lextab.py")
except OSError:
pass
try:
pymodule_out_remove("lextab.pyc")
except OSError:
pass
try:
pymodule_out_remove("lextab.pyo")
except OSError:
pass
def test_lex_optimize2(self):
try:
os.remove("opt2tab.py")
except OSError:
pass
try:
os.remove("opt2tab.pyc")
except OSError:
pass
try:
os.remove("opt2tab.pyo")
except OSError:
pass
run_import("lex_optimize2")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("opt2tab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize2.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("opt2tab.pyo"))
pymodule_out_remove("opt2tab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize2.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("opt2tab.pyo"))
try:
os.remove("opt2tab.py")
except OSError:
pass
try:
pymodule_out_remove("opt2tab.pyc")
except OSError:
pass
try:
pymodule_out_remove("opt2tab.pyo")
except OSError:
pass
def test_lex_optimize3(self):
try:
shutil.rmtree("lexdir")
except OSError:
pass
os.mkdir("lexdir")
os.mkdir("lexdir/sub")
open("lexdir/__init__.py","w").write("")
open("lexdir/sub/__init__.py","w").write("")
run_import("lex_optimize3")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lexdir/sub/calctab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize3.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lexdir/sub/calctab.pyo"))
pymodule_out_remove("lexdir/sub/calctab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize3.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lexdir/sub/calctab.pyo"))
try:
shutil.rmtree("lexdir")
except OSError:
pass
def test_lex_opt_alias(self):
try:
os.remove("aliastab.py")
except OSError:
pass
try:
os.remove("aliastab.pyc")
except OSError:
pass
try:
os.remove("aliastab.pyo")
except OSError:
pass
run_import("lex_opt_alias")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("aliastab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_opt_alias.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("aliastab.pyo"))
pymodule_out_remove("aliastab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_opt_alias.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("aliastab.pyo"))
try:
os.remove("aliastab.py")
except OSError:
pass
try:
pymodule_out_remove("aliastab.pyc")
except OSError:
pass
try:
pymodule_out_remove("aliastab.pyo")
except OSError:
pass
def test_lex_many_tokens(self):
try:
os.remove("manytab.py")
except OSError:
pass
try:
os.remove("manytab.pyc")
except OSError:
pass
try:
os.remove("manytab.pyo")
except OSError:
pass
run_import("lex_many_tokens")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(TOK34,'TOK34:',1,0)\n"
"(TOK143,'TOK143:',1,7)\n"
"(TOK269,'TOK269:',1,15)\n"
"(TOK372,'TOK372:',1,23)\n"
"(TOK452,'TOK452:',1,31)\n"
"(TOK561,'TOK561:',1,39)\n"
"(TOK999,'TOK999:',1,47)\n"
))
self.assert_(os.path.exists("manytab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_many_tokens.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(TOK34,'TOK34:',1,0)\n"
"(TOK143,'TOK143:',1,7)\n"
"(TOK269,'TOK269:',1,15)\n"
"(TOK372,'TOK372:',1,23)\n"
"(TOK452,'TOK452:',1,31)\n"
"(TOK561,'TOK561:',1,39)\n"
"(TOK999,'TOK999:',1,47)\n"
))
self.assert_(pymodule_out_exists("manytab.pyo"))
pymodule_out_remove("manytab.pyo")
try:
os.remove("manytab.py")
except OSError:
pass
try:
os.remove("manytab.pyc")
except OSError:
pass
try:
os.remove("manytab.pyo")
except OSError:
pass
# Tests related to run-time behavior of lexers
class LexRunTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_lex_hedit(self):
run_import("lex_hedit")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(H_EDIT_DESCRIPTOR,'abc',1,0)\n"
"(H_EDIT_DESCRIPTOR,'abcdefghij',1,6)\n"
"(H_EDIT_DESCRIPTOR,'xy',1,20)\n"))
def test_lex_state_try(self):
run_import("lex_state_try")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,'3',1,0)\n"
"(PLUS,'+',1,2)\n"
"(NUMBER,'4',1,4)\n"
"Entering comment state\n"
"comment body LexToken(body_part,'This is a comment */',1,9)\n"
"(PLUS,'+',1,30)\n"
"(NUMBER,'10',1,32)\n"
))
unittest.main()
| bsd-2-clause |
melund/python-prompt-toolkit | prompt_toolkit/history.py | 23 | 2853 | from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from six import with_metaclass
import datetime
import os
__all__ = (
'FileHistory',
'History',
'InMemoryHistory',
)
class History(with_metaclass(ABCMeta, object)):
"""
Base ``History`` interface.
"""
@abstractmethod
def append(self, string):
" Append string to history. "
@abstractmethod
def __getitem__(self, key):
" Return one item of the history. It should be accessible like a `list`. "
@abstractmethod
def __iter__(self):
" Iterate through all the items of the history. Cronologically. "
@abstractmethod
def __len__(self):
" Return the length of the history. "
def __bool__(self):
"""
Never evaluate to False, even when the history is empty.
(Python calls __len__ if __bool__ is not implemented.)
This is mainly to allow lazy evaluation::
x = history or InMemoryHistory()
"""
return True
__nonzero__ = __bool__ # For Python 2.
class InMemoryHistory(History):
"""
:class:`.History` class that keeps a list of all strings in memory.
"""
def __init__(self):
self.strings = []
def append(self, string):
self.strings.append(string)
def __getitem__(self, key):
return self.strings[key]
def __iter__(self):
return iter(self.strings)
def __len__(self):
return len(self.strings)
class FileHistory(History):
"""
:class:`.History` class that stores all strings in a file.
"""
def __init__(self, filename):
self.strings = []
self.filename = filename
self._load()
def _load(self):
lines = []
def add():
if lines:
# Join and drop trailing newline.
string = ''.join(lines)[:-1]
self.strings.append(string)
if os.path.exists(self.filename):
with open(self.filename, 'rb') as f:
for line in f:
line = line.decode('utf-8')
if line.startswith('+'):
lines.append(line[1:])
else:
add()
lines = []
add()
def append(self, string):
self.strings.append(string)
# Save to file.
with open(self.filename, 'ab') as f:
def write(t):
f.write(t.encode('utf-8'))
write('\n# %s\n' % datetime.datetime.now())
for line in string.split('\n'):
write('+%s\n' % line)
def __getitem__(self, key):
return self.strings[key]
def __iter__(self):
return iter(self.strings)
def __len__(self):
return len(self.strings)
| bsd-3-clause |
FuzzyHobbit/letsencrypt | letsencrypt-apache/letsencrypt_apache/tests/util.py | 8 | 5326 | """Common utilities for letsencrypt_apache."""
import os
import sys
import unittest
import augeas
import mock
import zope.component
from acme import jose
from letsencrypt.display import util as display_util
from letsencrypt.plugins import common
from letsencrypt.tests import test_util
from letsencrypt_apache import configurator
from letsencrypt_apache import constants
from letsencrypt_apache import obj
class ApacheTest(unittest.TestCase): # pylint: disable=too-few-public-methods
def setUp(self, test_dir="debian_apache_2_4/two_vhost_80",
config_root="debian_apache_2_4/two_vhost_80/apache2"):
# pylint: disable=arguments-differ
super(ApacheTest, self).setUp()
self.temp_dir, self.config_dir, self.work_dir = common.dir_setup(
test_dir=test_dir,
pkg="letsencrypt_apache.tests")
self.ssl_options = common.setup_ssl_options(
self.config_dir, constants.MOD_SSL_CONF_SRC,
constants.MOD_SSL_CONF_DEST)
self.config_path = os.path.join(self.temp_dir, config_root)
self.rsa512jwk = jose.JWKRSA.load(test_util.load_vector(
"rsa512_key.pem"))
class ParserTest(ApacheTest): # pytlint: disable=too-few-public-methods
def setUp(self, test_dir="debian_apache_2_4/two_vhost_80",
config_root="debian_apache_2_4/two_vhost_80/apache2"):
super(ParserTest, self).setUp(test_dir, config_root)
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
from letsencrypt_apache.parser import ApacheParser
self.aug = augeas.Augeas(
flags=augeas.Augeas.NONE | augeas.Augeas.NO_MODL_AUTOLOAD)
with mock.patch("letsencrypt_apache.parser.ApacheParser."
"update_runtime_variables"):
self.parser = ApacheParser(
self.aug, self.config_path, "dummy_ctl_path")
def get_apache_configurator(
config_path, config_dir, work_dir, version=(2, 4, 7), conf=None):
"""Create an Apache Configurator with the specified options.
:param conf: Function that returns binary paths. self.conf in Configurator
"""
backups = os.path.join(work_dir, "backups")
mock_le_config = mock.MagicMock(
apache_server_root=config_path,
apache_le_vhost_ext=constants.CLI_DEFAULTS["le_vhost_ext"],
backup_dir=backups,
config_dir=config_dir,
temp_checkpoint_dir=os.path.join(work_dir, "temp_checkpoints"),
in_progress_dir=os.path.join(backups, "IN_PROGRESS"),
work_dir=work_dir)
with mock.patch("letsencrypt_apache.configurator.le_util.run_script"):
with mock.patch("letsencrypt_apache.configurator.le_util."
"exe_exists") as mock_exe_exists:
mock_exe_exists.return_value = True
with mock.patch("letsencrypt_apache.parser.ApacheParser."
"update_runtime_variables"):
config = configurator.ApacheConfigurator(
config=mock_le_config,
name="apache",
version=version)
# This allows testing scripts to set it a bit more quickly
if conf is not None:
config.conf = conf # pragma: no cover
config.prepare()
return config
def get_vh_truth(temp_dir, config_name):
"""Return the ground truth for the specified directory."""
if config_name == "debian_apache_2_4/two_vhost_80":
prefix = os.path.join(
temp_dir, config_name, "apache2/sites-available")
aug_pre = "/files" + prefix
vh_truth = [
obj.VirtualHost(
os.path.join(prefix, "encryption-example.conf"),
os.path.join(aug_pre, "encryption-example.conf/VirtualHost"),
set([obj.Addr.fromstring("*:80")]),
False, True, "encryption-example.demo"),
obj.VirtualHost(
os.path.join(prefix, "default-ssl.conf"),
os.path.join(aug_pre, "default-ssl.conf/IfModule/VirtualHost"),
set([obj.Addr.fromstring("_default_:443")]), True, False),
obj.VirtualHost(
os.path.join(prefix, "000-default.conf"),
os.path.join(aug_pre, "000-default.conf/VirtualHost"),
set([obj.Addr.fromstring("*:80")]), False, True,
"ip-172-30-0-17"),
obj.VirtualHost(
os.path.join(prefix, "letsencrypt.conf"),
os.path.join(aug_pre, "letsencrypt.conf/VirtualHost"),
set([obj.Addr.fromstring("*:80")]), False, True,
"letsencrypt.demo"),
obj.VirtualHost(
os.path.join(prefix, "mod_macro-example.conf"),
os.path.join(aug_pre,
"mod_macro-example.conf/Macro/VirtualHost"),
set([obj.Addr.fromstring("*:80")]), False, True, modmacro=True),
obj.VirtualHost(
os.path.join(prefix, "default-ssl-port-only.conf"),
os.path.join(aug_pre, "default-ssl-port-only.conf/IfModule/VirtualHost"),
set([obj.Addr.fromstring("_default_:443")]), True, False),
]
return vh_truth
return None # pragma: no cover
| apache-2.0 |
iceout/python_koans_practice | python2/koans/about_new_style_classes.py | 1 | 2171 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(____, issubclass(self.NewStyleClass, object))
self.assertEqual(____, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(__, len(dir(self.OldStyleClass)))
self.assertEqual(__, self.OldStyleClass.__doc__)
self.assertEqual(__, self.OldStyleClass.__module__)
self.assertEqual(__, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual(__, self.OldStyleClass.__class__)
try:
cls = self.OldStyleClass.__class__
except Exception as ex:
pass
self.assertMatch(__, ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, self.NewStyleClass.__class__)
self.assertEqual(
__,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual(__, old_style.__class__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, new_style.__class__)
self.assertEqual(__, type(new_style) == new_style.__class__)
| mit |
prakritish/ansible | test/integration/cleanup_gce.py | 66 | 2621 | '''
Find and delete GCE resources matching the provided --match string. Unless
--yes|-y is provided, the prompt for confirmation prior to deleting resources.
Please use caution, you can easily delete your *ENTIRE* GCE infrastructure.
'''
import os
import re
import sys
import optparse
import yaml
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
import gce_credentials
def delete_gce_resources(get_func, attr, opts):
for item in get_func():
val = getattr(item, attr)
if re.search(opts.match_re, val, re.IGNORECASE):
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
assert hasattr(item, 'destroy'), "Class <%s> has no delete attribute" % item.__class__
if assumeyes:
item.destroy()
print ("Deleted %s" % item)
def parse_args():
parser = optparse.OptionParser(usage="%s [options]" % (sys.argv[0],),
description=__doc__)
gce_credentials.add_credentials_options(parser)
parser.add_option("--yes", "-y",
action="store_true", dest="assumeyes",
default=False,
help="Don't prompt for confirmation")
parser.add_option("--match",
action="store", dest="match_re",
default="^ansible-testing-",
help="Regular expression used to find GCE resources (default: %default)")
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
# Connect to GCE
gce = gce_credentials.get_gce_driver(opts)
try:
# Delete matching instances
delete_gce_resources(gce.list_nodes, 'name', opts)
# Delete matching snapshots
def get_snapshots():
for volume in gce.list_volumes():
for snapshot in gce.list_volume_snapshots(volume):
yield snapshot
delete_gce_resources(get_snapshots, 'name', opts)
# Delete matching disks
delete_gce_resources(gce.list_volumes, 'name', opts)
except KeyboardInterrupt as e:
print("\nExiting on user command.")
| gpl-3.0 |
praveenkumar/ansible | lib/ansible/plugins/strategies/linear.py | 1 | 14325 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in iteritems(host_tasks):
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError as e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError) as e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 |
bilgili/Voreen | modules/python/ext/python27/modules/tempfile.py | 35 | 18061 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
template - the default prefix for all temporary names.
You may change this to control the default prefix.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import os as _os
import errno as _errno
from random import Random as _Random
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except IOError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises os.error if the
# file doesn't exist.
def _stat(fn):
try:
f = open(fn)
except IOError:
raise _os.error
f.close()
def _exists(fn):
try:
_stat(fn)
except _os.error:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789_")
def __init__(self):
self.mutex = _allocate_lock()
self.normcase = _os.path.normcase
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def next(self):
m = self.mutex
c = self.characters
choose = self.rng.choice
m.acquire()
try:
letters = [choose(c) for dummy in "123456"]
finally:
m.release()
return self.normcase(''.join(letters))
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, _os.error):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0600)
fp = _os.fdopen(fd, 'w')
fp.write('blat')
fp.close()
_os.unlink(filename)
del fp, fd
return dir
except (OSError, IOError), e:
if e[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0700)
return file
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not issubclass(type(a), type(0)):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _os.fdopen(fd, mode, bufsize)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _os.fdopen(fd, mode, bufsize)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from
StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', bufsize=-1,
suffix="", prefix=template, dir=None):
self._file = _StringIO()
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir)
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(*self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# _StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
return self._file.encoding
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
return self._file.mode
@property
def name(self):
return self._file.name
@property
def newlines(self):
return self._file.newlines
def next(self):
return self._file.next
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self):
self._file.truncate()
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
def xreadlines(self, *args):
return self._file.xreadlines(*args)
| gpl-2.0 |
o5k/openerp-oemedical-v0.1 | openerp/addons/stock/wizard/stock_inventory_line_split.py | 64 | 4976 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class stock_inventory_line_split(osv.osv_memory):
_inherit = "stock.move.split"
_name = "stock.inventory.line.split"
_description = "Split inventory lines"
_columns = {
'line_ids': fields.one2many('stock.inventory.line.split.lines', 'wizard_id', 'Serial Numbers'),
'line_exist_ids': fields.one2many('stock.inventory.line.split.lines', 'wizard_exist_id', 'Serial Numbers'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id',False)
res = {}
line = self.pool.get('stock.inventory.line').browse(cr, uid, record_id, context=context)
if 'product_id' in fields:
res.update({'product_id':line.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': line.product_uom.id})
if 'qty' in fields:
res.update({'qty': line.product_qty})
return res
def split(self, cr, uid, ids, line_ids, context=None):
""" To split stock inventory lines according to serial numbers.
:param line_ids: the ID or list of IDs of inventory lines we want to split
"""
if context is None:
context = {}
assert context.get('active_model') == 'stock.inventory.line',\
'Incorrect use of the inventory line split wizard.'
prodlot_obj = self.pool.get('stock.production.lot')
ir_sequence_obj = self.pool.get('ir.sequence')
line_obj = self.pool.get('stock.inventory.line')
new_line = []
for data in self.browse(cr, uid, ids, context=context):
for inv_line in line_obj.browse(cr, uid, line_ids, context=context):
line_qty = inv_line.product_qty
quantity_rest = inv_line.product_qty
new_line = []
if data.use_exist:
lines = [l for l in data.line_exist_ids if l]
else:
lines = [l for l in data.line_ids if l]
for line in lines:
quantity = line.quantity
if quantity <= 0 or line_qty == 0:
continue
quantity_rest -= quantity
if quantity_rest < 0:
quantity_rest = quantity
break
default_val = {
'product_qty': quantity,
}
if quantity_rest > 0:
current_line = line_obj.copy(cr, uid, inv_line.id, default_val)
new_line.append(current_line)
if quantity_rest == 0:
current_line = inv_line.id
prodlot_id = False
if data.use_exist:
prodlot_id = line.prodlot_id.id
if not prodlot_id:
prodlot_id = prodlot_obj.create(cr, uid, {
'name': line.name,
'product_id': inv_line.product_id.id},
context=context)
line_obj.write(cr, uid, [current_line], {'prod_lot_id': prodlot_id})
prodlot = prodlot_obj.browse(cr, uid, prodlot_id)
update_val = {}
if quantity_rest > 0:
update_val['product_qty'] = quantity_rest
line_obj.write(cr, uid, [inv_line.id], update_val)
return new_line
class stock_inventory_split_lines(osv.osv_memory):
_inherit = "stock.move.split.lines"
_name = "stock.inventory.line.split.lines"
_description = "Inventory Split lines"
_columns = {
'wizard_id': fields.many2one('stock.inventory.line.split', 'Parent Wizard'),
'wizard_exist_id': fields.many2one('stock.inventory.line.split', 'Parent Wizard'),
}
| agpl-3.0 |
athompso/ansible-modules-core | cloud/openstack/os_ironic_node.py | 131 | 12309 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_ironic_node
short_description: Activate/Deactivate Bare Metal Resources from OpenStack
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Deploy to nodes controlled by Ironic.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
choices: ['true', 'false']
default: true
uuid:
description:
- globally unique identifier (UUID) to be given to the resource.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
required: false
default: None
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
node.
required: false
default: None
instance_info:
description:
- Definition of the instance information which is used to deploy
the node. This information is only required when an instance is
set to present.
suboptions:
image_source:
description:
- An HTTP(S) URL where the image can be retrieved from.
image_checksum:
description:
- The checksum of image_source.
image_disk_format:
description:
- The type of image that has been requested to be deployed.
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
choices: ['present', 'absent']
default: present
maintenance:
description:
- A setting to allow the direct control if a node is in
maintenance mode.
required: false
default: false
maintenance_reason:
description:
- A string expression regarding the reason a node is in a
maintenance mode.
required: false
default: None
'''
EXAMPLES = '''
# Activate a node by booting an image with a configdrive attached
os_ironic_node:
cloud: "openstack"
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
state: present
power: present
deploy: True
maintenance: False
config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
'''
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
# TODO(TheJulia): Change this over to use the machine patch method
# in shade once it is available.
def _prepare_instance_info_patch(instance_info):
patch = []
patch.append({
'op': 'replace',
'path': '/instance_info',
'value': instance_info
})
return patch
def _is_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
if value in true_values:
return True
return False
def _is_false(value):
false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
if value in false_values:
return True
return False
def _check_set_maintenance(module, cloud, node):
if _is_true(module.params['maintenance']):
if _is_false(node['maintenance']):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node has been set into "
"maintenance mode")
else:
# User has requested maintenance state, node is already in the
# desired state, checking to see if the reason has changed.
if (str(node['maintenance_reason']) not in
str(module.params['maintenance_reason'])):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node maintenance reason "
"updated, cannot take any "
"additional action.")
elif _is_false(module.params['maintenance']):
if node['maintenance'] is True:
cloud.remove_machine_from_maintenance(node['uuid'])
return True
else:
module.fail_json(msg="maintenance parameter was set but a valid "
"the value was not recognized.")
return False
def _check_set_power_state(module, cloud, node):
if 'power on' in str(node['power_state']):
if _is_false(module.params['power']):
# User has requested the node be powered off.
cloud.set_machine_power_off(node['uuid'])
module.exit_json(changed=True, msg="Power requested off")
if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
return False
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
module.exit_json(
changed=False,
msg="Power for node is %s, node must be reactivated "
"OR set to state absent"
)
# In the event the power has been toggled on and
# deployment has been requested, we need to skip this
# step.
if (_is_true(module.params['power']) and
_is_false(module.params['deploy'])):
# Node is powered down when it is not awaiting to be provisioned
cloud.set_machine_power_on(node['uuid'])
return True
# Default False if no action has been taken.
return False
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
instance_info=dict(type='dict', required=False),
config_drive=dict(required=False),
ironic_url=dict(required=False),
state=dict(required=False, default='present'),
maintenance=dict(required=False),
maintenance_reason=dict(required=False),
power=dict(required=False, default='present'),
deploy=dict(required=False, default=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"to use this module.")
try:
cloud = shade.operator_cloud(**module.params)
node = cloud.get_machine(node_id)
if node is None:
module.fail_json(msg="node not found")
uuid = node['uuid']
instance_info = module.params['instance_info']
changed = False
# User has reqeusted desired state to be in maintenance state.
if module.params['state'] is 'maintenance':
module.params['maintenance'] = True
if node['provision_state'] in [
'cleaning',
'deleting',
'wait call-back']:
module.fail_json(msg="Node is in %s state, cannot act upon the "
"request as the node is in a transition "
"state" % node['provision_state'])
# TODO(TheJulia) This is in-development code, that requires
# code in the shade library that is still in development.
if _check_set_maintenance(module, cloud, node):
if node['provision_state'] in 'active':
module.exit_json(changed=True,
result="Maintenance state changed")
changed = True
node = cloud.get_machine(node_id)
if _check_set_power_state(module, cloud, node):
changed = True
node = cloud.get_machine(node_id)
if _is_true(module.params['state']):
if _is_false(module.params['deploy']):
module.exit_json(
changed=changed,
result="User request has explicitly disabled "
"deployment logic"
)
if 'active' in node['provision_state']:
module.exit_json(
changed=changed,
result="Node already in an active state."
)
if instance_info is None:
module.fail_json(
changed=changed,
msg="When setting an instance to present, "
"instance_info is a required variable.")
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
patch = _prepare_instance_info_patch(instance_info)
cloud.set_node_instance_info(uuid, patch)
cloud.validate_node(uuid)
cloud.activate_node(uuid, module.params['config_drive'])
# TODO(TheJulia): Add more error checking and a wait option.
# We will need to loop, or just add the logic to shade,
# although this could be a very long running process as
# baremetal deployments are not a "quick" task.
module.exit_json(changed=changed, result="node activated")
elif _is_false(module.params['state']):
if node['provision_state'] not in "deleted":
cloud.purge_node_instance_info(uuid)
cloud.deactivate_node(uuid)
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="node not found")
else:
module.fail_json(msg="State must be present, absent, "
"maintenance, off")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| gpl-3.0 |
raildo/nova | nova/virt/vmwareapi/error_util.py | 59 | 1118 | # Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception classes specific for the VMware driver.
"""
from nova import exception
from nova.i18n import _
class NoRootDiskDefined(exception.NovaException):
msg_fmt = _("No root disk defined.")
class PbmDefaultPolicyUnspecified(exception.Invalid):
msg_fmt = _("Default PBM policy is required if PBM is enabled.")
class PbmDefaultPolicyDoesNotExist(exception.NovaException):
msg_fmt = _("The default PBM policy doesn't exist on the backend.")
| apache-2.0 |
The-Compiler/qutebrowser | tests/helpers/messagemock.py | 1 | 2576 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""pytest helper to monkeypatch the message module."""
import logging
import attr
import pytest
from qutebrowser.utils import usertypes, message
@attr.s
class Message:
"""Information about a shown message."""
level = attr.ib()
text = attr.ib()
class MessageMock:
"""Helper object for message_mock.
Attributes:
Message: A object representing a message.
messages: A list of Message objects.
"""
def __init__(self):
self.messages = []
def _record_message(self, level, text):
log_levels = {
usertypes.MessageLevel.error: logging.ERROR,
usertypes.MessageLevel.info: logging.INFO,
usertypes.MessageLevel.warning: logging.WARNING,
}
log_level = log_levels[level]
logging.getLogger('messagemock').log(log_level, text)
self.messages.append(Message(level, text))
def getmsg(self, level=None):
"""Get the only message in self.messages.
Raises ValueError if there are multiple or no messages.
Args:
level: The message level to check against, or None.
"""
assert len(self.messages) == 1
msg = self.messages[0]
if level is not None:
assert msg.level == level
return msg
def patch(self):
"""Start recording messages."""
message.global_bridge.show_message.connect(self._record_message)
message.global_bridge._connected = True
def unpatch(self):
"""Stop recording messages."""
message.global_bridge.show_message.disconnect(self._record_message)
@pytest.fixture
def message_mock():
"""Fixture to get a MessageMock."""
mmock = MessageMock()
mmock.patch()
yield mmock
mmock.unpatch()
| gpl-3.0 |
yxl/emscripten-calligra-mobile | 3rdparty/google-breakpad/src/tools/gyp/pylib/gyp/generator/android.py | 25 | 43345 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
make.ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'NONE')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)' %
main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)' %
main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)'
% main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)'
% main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
self.WriteLn('%s: %s' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -r $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
config = configs[spec['default_configuration']]
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags'))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS')
cflags_c, includes_from_cflags_c = self.ExtractIncludesFromCFlags(
config.get('cflags_c'))
extracted_includes.extend(includes_from_cflags_c)
self.WriteList(cflags_c, 'MY_CFLAGS_C')
self.WriteList(config.get('defines'), 'MY_DEFS', prefix='-D',
quoter=make.EscapeCppDefine)
self.WriteLn('LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host or
# target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES)')
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeLdFlags(self, ld_flags):
""" Clean up ldflags from gyp file.
Remove any ldflags that contain android_top_dir.
Args:
ld_flags: ldflags from gyp files.
Returns:
clean ldflags
"""
clean_ldflags = []
for flag in ld_flags:
if self.android_top_dir in flag:
continue
clean_ldflags.append(flag)
return clean_ldflags
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
if cflags:
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
config = configs[spec['default_configuration']]
# LDFLAGS
ldflags = list(config.get('ldflags', []))
static_flags, dynamic_flags = self.ComputeAndroidLibraryModuleNames(
ldflags)
self.WriteLn('')
self.WriteList(self.NormalizeLdFlags(ldflags), 'LOCAL_LDFLAGS')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_flags + static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_flags + dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
# Sort to avoid non-functional changes to makefile.
build_files = sorted([os.path.join('$(LOCAL_PATH)', f) for f in build_files])
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
build_files_args = [os.path.join('$(PRIVATE_LOCAL_PATH)', f)
for f in build_files_args]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
makefile_path = os.path.join('$(LOCAL_PATH)', makefile_name)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write('GYP_FILES := \\\n %s\n\n' %
'\\\n '.join(map(Sourceify, build_files)))
root_makefile.write('%s: PRIVATE_LOCAL_PATH := $(LOCAL_PATH)\n' %
makefile_path)
root_makefile.write('%s: $(GYP_FILES)\n' % makefile_path)
root_makefile.write('\techo ACTION Regenerating $@\n\t%s\n\n' %
gyp.common.EncodePOSIXShellList([gyp_binary, '-fandroid'] +
gyp.RegenerateFlags(options) +
build_files_args))
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid.mk' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
make.ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, base_path, output_file,
spec, configs, part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Some tools need to know the absolute path of the top directory.
root_makefile.write('GYP_ABS_ANDROID_TOP_DIR := $(shell pwd)\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if generator_flags.get('auto_regeneration', True):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| gpl-2.0 |
jackrzhang/zulip | zerver/lib/timestamp.py | 16 | 1534 | import datetime
import calendar
from django.utils.timezone import utc as timezone_utc
class TimezoneNotUTCException(Exception):
pass
def verify_UTC(dt: datetime.datetime) -> None:
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) != timezone_utc.utcoffset(dt):
raise TimezoneNotUTCException("Datetime %s does not have a UTC timezone." % (dt,))
def convert_to_UTC(dt: datetime.datetime) -> datetime.datetime:
if dt.tzinfo is None:
return dt.replace(tzinfo=timezone_utc)
return dt.astimezone(timezone_utc)
def floor_to_hour(dt: datetime.datetime) -> datetime.datetime:
verify_UTC(dt)
return datetime.datetime(*dt.timetuple()[:4]) \
.replace(tzinfo=timezone_utc)
def floor_to_day(dt: datetime.datetime) -> datetime.datetime:
verify_UTC(dt)
return datetime.datetime(*dt.timetuple()[:3]) \
.replace(tzinfo=timezone_utc)
def ceiling_to_hour(dt: datetime.datetime) -> datetime.datetime:
floor = floor_to_hour(dt)
if floor == dt:
return floor
return floor + datetime.timedelta(hours=1)
def ceiling_to_day(dt: datetime.datetime) -> datetime.datetime:
floor = floor_to_day(dt)
if floor == dt:
return floor
return floor + datetime.timedelta(days=1)
def timestamp_to_datetime(timestamp: float) -> datetime.datetime:
return datetime.datetime.fromtimestamp(float(timestamp), tz=timezone_utc)
def datetime_to_timestamp(dt: datetime.datetime) -> int:
verify_UTC(dt)
return calendar.timegm(dt.timetuple())
| apache-2.0 |
openstack-hyper-v-python/numpy | numpy/core/tests/test_unicode.py | 69 | 12594 | from __future__ import division, absolute_import, print_function
import sys
from numpy.testing import *
from numpy.core import *
from numpy.compat import asbytes, sixu
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
# Python 3.3 uses a flexible string representation
ucs4 = False
def buffer_length(arr):
if isinstance(arr, unicode):
arr = str(arr)
return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return prod(v.shape) * v.itemsize
elif sys.version_info[0] >= 3:
import array as _array
ucs4 = (_array.array('u').itemsize == 4)
def buffer_length(arr):
if isinstance(arr, unicode):
return _array.array('u').itemsize * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return prod(v.shape) * v.itemsize
else:
if len(buffer(sixu('u'))) == 4:
ucs4 = True
else:
ucs4 = False
def buffer_length(arr):
if isinstance(arr, ndarray):
return len(arr.data)
return len(buffer(arr))
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
# Value that can be represented in UCS2 interpreters
ucs2_value = sixu('\u0900')
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
ucs4_value = sixu('\U00100900')
############################################################
# Creation tests
############################################################
class create_zeros(object):
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
self.assertTrue(ua_scalar == sixu(''))
# Encode to ascii and double check
self.assertTrue(ua_scalar.encode('ascii') == asbytes(''))
# Check buffer lengths for scalars
if ucs4:
self.assertTrue(buffer_length(ua_scalar) == 0)
else:
self.assertTrue(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
ua = zeros((), dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
ua = zeros((2,), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
ua = zeros((2, 3, 4), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class test_create_zeros_1(create_zeros, TestCase):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
class test_create_zeros_2(create_zeros, TestCase):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
class test_create_zeros_1009(create_zeros, TestCase):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
class create_values(object):
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
self.assertTrue(ua_scalar.encode('utf-8') == \
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
"""Check creation of 0-dimensional objects with values"""
ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
"""Check creation of single-dimensional objects with values"""
ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
"""Check creation of multi-dimensional objects with values"""
ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class test_create_values_1_ucs2(create_values, TestCase):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_create_values_1_ucs4(create_values, TestCase):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_create_values_2_ucs2(create_values, TestCase):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_create_values_2_ucs4(create_values, TestCase):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_create_values_1009_ucs2(create_values, TestCase):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_create_values_1009_ucs4(create_values, TestCase):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Assignment tests
############################################################
class assign_values(object):
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
self.assertTrue(ua_scalar.encode('utf-8') == \
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
"""Check assignment of 0-dimensional objects with values"""
ua = zeros((), dtype='U%s' % self.ulen)
ua[()] = self.ucs_value*self.ulen
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
"""Check assignment of single-dimensional objects with values"""
ua = zeros((2,), dtype='U%s' % self.ulen)
ua[0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0], 4*self.ulen*2)
ua[1] = self.ucs_value*self.ulen
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
"""Check assignment of multi-dimensional objects with values"""
ua = zeros((2, 3, 4), dtype='U%s' % self.ulen)
ua[0, 0, 0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
ua[-1, -1, -1] = self.ucs_value*self.ulen
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class test_assign_values_1_ucs2(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_assign_values_1_ucs4(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_assign_values_2_ucs2(assign_values, TestCase):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_assign_values_2_ucs4(assign_values, TestCase):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_assign_values_1009_ucs2(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_assign_values_1009_ucs4(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Byteorder tests
############################################################
class byteorder_values:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
"""Check byteorder of 0-dimensional objects"""
ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
self.assertTrue(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesSD(self):
"""Check byteorder of single-dimensional objects"""
ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
self.assertTrue(ua[0] != ua2[0])
self.assertTrue(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesMD(self):
"""Check byteorder of multi-dimensional objects"""
ua = array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0])
self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
class test_byteorder_1_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_byteorder_1_ucs4(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_byteorder_2_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_byteorder_2_ucs4(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_byteorder_1009_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_byteorder_1009_ucs4(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Odingod/mne-python | mne/realtime/tests/test_fieldtrip_client.py | 5 | 2801 | # Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import time
import os
import threading
import subprocess
import warnings
import os.path as op
from nose.tools import assert_true
import mne
from mne.utils import requires_neuromag2ft, run_tests_if_main
from mne.realtime import FieldTripClient
from mne.externals.six.moves import queue
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.realpath(op.join(base_dir, 'test_raw.fif'))
warnings.simplefilter('always') # enable b/c these tests throw warnings
def _run_buffer(kill_signal, neuromag2ft_fname):
# Works with neuromag2ft-3.0.2
cmd = (neuromag2ft_fname, '--file', raw_fname, '--speed', '4.0')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Let measurement continue for the entire duration
kill_signal.get(timeout=10.0)
process.terminate()
@requires_neuromag2ft
def test_fieldtrip_client():
"""Test fieldtrip_client"""
neuromag2ft_fname = op.realpath(op.join(os.environ['NEUROMAG2FT_ROOT'],
'neuromag2ft'))
kill_signal = queue.Queue()
thread = threading.Thread(target=_run_buffer, args=(kill_signal,
neuromag2ft_fname))
thread.daemon = True
thread.start()
time.sleep(0.25)
try:
# Start the FieldTrip buffer
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with FieldTripClient(host='localhost', port=1972,
tmax=5, wait_max=1) as rt_client:
tmin_samp1 = rt_client.tmin_samp
time.sleep(1) # Pause measurement
assert_true(len(w) >= 1)
# Start the FieldTrip buffer again
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with FieldTripClient(host='localhost', port=1972,
tmax=5, wait_max=1) as rt_client:
raw_info = rt_client.get_measurement_info()
tmin_samp2 = rt_client.tmin_samp
picks = mne.pick_types(raw_info, meg='grad', eeg=False,
stim=False, eog=False)
epoch = rt_client.get_data_as_epoch(n_samples=5, picks=picks)
_, n_channels, n_samples = epoch.get_data().shape
assert_true(tmin_samp2 > tmin_samp1)
assert_true(len(w) >= 1)
assert_true(n_samples == 5)
assert_true(n_channels == len(picks))
kill_signal.put(False) # stop the buffer
except:
kill_signal.put(False) # stop the buffer even if tests fail
raise
run_tests_if_main()
| bsd-3-clause |
consulo/consulo-python | plugin/src/main/dist/helpers/pydev/pydev_ipython/inputhookqt4.py | 104 | 7242 | # -*- coding: utf-8 -*-
"""
Qt4's inputhook support function
Author: Christian Boos
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import signal
import threading
from pydev_ipython.qt_for_kernel import QtCore, QtGui
from pydev_ipython.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready
# To minimise future merging complexity, rather than edit the entire code base below
# we fake InteractiveShell here
class InteractiveShell:
_instance = None
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def set_hook(self, *args, **kwargs):
# We don't consider the pre_prompt_hook because we don't have
# KeyboardInterrupts to consider since we are running under PyDev
pass
#-----------------------------------------------------------------------------
# Module Globals
#-----------------------------------------------------------------------------
got_kbdint = False
sigint_timer = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def create_inputhook_qt4(mgr, app=None):
"""Create an input hook for running the Qt4 application event loop.
Parameters
----------
mgr : an InputHookManager
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Returns
-------
A pair consisting of a Qt Application (either the one given or the
one found or created) and a inputhook.
Notes
-----
We use a custom input hook instead of PyQt4's default one, as it
interacts better with the readline packages (issue #481).
The inputhook function works in tandem with a 'pre_prompt_hook'
which automatically restores the hook as an inputhook in case the
latter has been temporarily disabled after having intercepted a
KeyboardInterrupt.
"""
if app is None:
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([" "])
# Re-use previously created inputhook if any
ip = InteractiveShell.instance()
if hasattr(ip, '_inputhook_qt4'):
return app, ip._inputhook_qt4
# Otherwise create the inputhook_qt4/preprompthook_qt4 pair of
# hooks (they both share the got_kbdint flag)
def inputhook_qt4():
"""PyOS_InputHook python hook for Qt4.
Process pending Qt events and if there's no pending keyboard
input, spend a short slice of time (50ms) running the Qt event
loop.
As a Python ctypes callback can't raise an exception, we catch
the KeyboardInterrupt and temporarily deactivate the hook,
which will let a *second* CTRL+C be processed normally and go
back to a clean prompt line.
"""
try:
allow_CTRL_C()
app = QtCore.QCoreApplication.instance()
if not app: # shouldn't happen, but safer if it happens anyway...
return 0
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
if not stdin_ready():
# Generally a program would run QCoreApplication::exec()
# from main() to enter and process the Qt event loop until
# quit() or exit() is called and the program terminates.
#
# For our input hook integration, we need to repeatedly
# enter and process the Qt event loop for only a short
# amount of time (say 50ms) to ensure that Python stays
# responsive to other user inputs.
#
# A naive approach would be to repeatedly call
# QCoreApplication::exec(), using a timer to quit after a
# short amount of time. Unfortunately, QCoreApplication
# emits an aboutToQuit signal before stopping, which has
# the undesirable effect of closing all modal windows.
#
# To work around this problem, we instead create a
# QEventLoop and call QEventLoop::exec(). Other than
# setting some state variables which do not seem to be
# used anywhere, the only thing QCoreApplication adds is
# the aboutToQuit signal which is precisely what we are
# trying to avoid.
timer = QtCore.QTimer()
event_loop = QtCore.QEventLoop()
timer.timeout.connect(event_loop.quit)
while not stdin_ready():
timer.start(50)
event_loop.exec_()
timer.stop()
except KeyboardInterrupt:
global got_kbdint, sigint_timer
ignore_CTRL_C()
got_kbdint = True
mgr.clear_inputhook()
# This generates a second SIGINT so the user doesn't have to
# press CTRL+C twice to get a clean prompt.
#
# Since we can't catch the resulting KeyboardInterrupt here
# (because this is a ctypes callback), we use a timer to
# generate the SIGINT after we leave this callback.
#
# Unfortunately this doesn't work on Windows (SIGINT kills
# Python and CTRL_C_EVENT doesn't work).
if(os.name == 'posix'):
pid = os.getpid()
if(not sigint_timer):
sigint_timer = threading.Timer(.01, os.kill,
args=[pid, signal.SIGINT] )
sigint_timer.start()
else:
print("\nKeyboardInterrupt - Ctrl-C again for new prompt")
except: # NO exceptions are allowed to escape from a ctypes callback
ignore_CTRL_C()
from traceback import print_exc
print_exc()
print("Got exception from inputhook_qt4, unregistering.")
mgr.clear_inputhook()
finally:
allow_CTRL_C()
return 0
def preprompthook_qt4(ishell):
"""'pre_prompt_hook' used to restore the Qt4 input hook
(in case the latter was temporarily deactivated after a
CTRL+C)
"""
global got_kbdint, sigint_timer
if(sigint_timer):
sigint_timer.cancel()
sigint_timer = None
if got_kbdint:
mgr.set_inputhook(inputhook_qt4)
got_kbdint = False
ip._inputhook_qt4 = inputhook_qt4
ip.set_hook('pre_prompt_hook', preprompthook_qt4)
return app, inputhook_qt4
| apache-2.0 |
soumyajitpaul/Soumyajit-Github-Byte-3 | lib/flask/sessions.py | 348 | 12882 | # -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from itsdangerous import URLSafeTimedSerializer, BadSignature
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes `False` in.
new = False
#: for some backends this will always be `True`, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes `True` in.
modified = True
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
return json.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' m':
return Markup(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Baseclass for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('the session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns `None` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop of the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's `None`.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or `None` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def open_session(self, app, request):
"""This method has to be implemented and must either return `None`
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
from flask.debughelpers import UnexpectedUnicodeError
| apache-2.0 |
jaggu303619/asylum | openerp/netsvc.py | 8 | 12196 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Common Services: netsvc
#.apidoc module-mods: member-order: bysource
import errno
import logging
import logging.handlers
import os
import platform
import release
import socket
import sys
import threading
import time
import types
from pprint import pformat
try:
import psutil
except ImportError:
psutil = None
# TODO modules that import netsvc only for things from loglevels must be changed to use loglevels.
from loglevels import *
import tools
import openerp
_logger = logging.getLogger(__name__)
def close_socket(sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
if e.errno == errno.EBADF:
# Werkzeug > 0.9.6 closes the socket itself (see commit
# https://github.com/mitsuhiko/werkzeug/commit/4d8ca089)
return
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
def abort_response(dummy_1, description, dummy_2, details):
# TODO Replace except_{osv,orm} with these directly.
raise openerp.osv.osv.except_osv(description, details)
class Service(object):
""" Base class for Local services
Functionality here is trusted, no authentication.
Workflow engine and reports subclass this.
"""
_services = {}
def __init__(self, name):
Service._services[name] = self
self.__name = name
@classmethod
def exists(cls, name):
return name in cls._services
@classmethod
def remove(cls, name):
if cls.exists(name):
cls._services.pop(name)
def LocalService(name):
# Special case for addons support, will be removed in a few days when addons
# are updated to directly use openerp.osv.osv.service.
if name == 'object_proxy':
return openerp.osv.osv.service
return Service._services[name]
class ExportService(object):
""" Proxy for exported services.
Note that this class has no direct proxy, capable of calling
eservice.method(). Rather, the proxy should call
dispatch(method, params)
"""
_services = {}
def __init__(self, name):
ExportService._services[name] = self
self.__name = name
_logger.debug("Registered an exported service: %s" % name)
@classmethod
def getService(cls,name):
return cls._services[name]
# Dispatch a RPC call w.r.t. the method name. The dispatching
# w.r.t. the service (this class) is done by OpenERPDispatcher.
def dispatch(self, method, params):
raise Exception("stub dispatch at %s" % self.__name)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.TEST: (WHITE, BLUE),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING[record.levelno]
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
else:
handler = logging.handlers.SysLogHandler('/dev/log')
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(logf,'D',1,30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.handlers.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
if isinstance(handler, logging.StreamHandler) \
and hasattr(handler.stream, 'fileno') \
and os.isatty(handler.stream.fileno()):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
# Configure handlers
default_config = [
'openerp.netsvc.rpc.request:INFO',
'openerp.netsvc.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
if tools.config['log_level'] == 'info':
pseudo_config = []
elif tools.config['log_level'] == 'debug_rpc':
pseudo_config = ['openerp:DEBUG','openerp.netsvc.rpc.request:DEBUG']
elif tools.config['log_level'] == 'debug_rpc_answer':
pseudo_config = ['openerp:DEBUG','openerp.netsvc.rpc.request:DEBUG', 'openerp.netsvc.rpc.response:DEBUG']
elif tools.config['log_level'] == 'debug':
pseudo_config = ['openerp:DEBUG']
elif tools.config['log_level'] == 'test':
pseudo_config = ['openerp:TEST']
elif tools.config['log_level'] == 'warn':
pseudo_config = ['openerp:WARNING']
elif tools.config['log_level'] == 'error':
pseudo_config = ['openerp:ERROR']
elif tools.config['log_level'] == 'critical':
pseudo_config = ['openerp:CRITICAL']
elif tools.config['log_level'] == 'debug_sql':
pseudo_config = ['openerp.sql_db:DEBUG']
else:
pseudo_config = []
logconfig = tools.config['log_handler']
for logconfig_item in default_config + pseudo_config + logconfig:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in default_config + pseudo_config + logconfig:
_logger.debug('logger level set: "%s"', logconfig_item)
# A alternative logging scheme for automated runs of the
# server intended to test it.
def init_alternative_logger():
class H(logging.Handler):
def emit(self, record):
if record.levelno > 20:
print record.levelno, record.pathname, record.msg
handler = H()
# Add the handler to the 'openerp' logger.
logger = logging.getLogger('openerp')
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
if len(args) > 2:
args = list(args)
args[2] = '*'
return tuple(args)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix+pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
This is pure Python code, the actual marshalling (from/to XML-RPC or
NET-RPC) is done in a upper layer.
"""
try:
rpc_request = logging.getLogger(__name__ + '.rpc.request')
rpc_response = logging.getLogger(__name__ + '.rpc.response')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
start_time = time.time()
start_rss, start_vms = 0, 0
if psutil:
start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
if rpc_request and rpc_response_flag:
log(rpc_request,logging.DEBUG,'%s.%s'%(service_name,method), replace_request_password(params))
result = ExportService.getService(service_name).dispatch(method, params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
end_rss, end_vms = 0, 0
if psutil:
end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
logline = '%s.%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (service_name, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
log(rpc_response,logging.DEBUG, logline, result)
else:
log(rpc_request,logging.DEBUG, logline, replace_request_password(params), depth=1)
return result
except openerp.exceptions.AccessError:
raise
except openerp.exceptions.AccessDenied:
raise
except openerp.exceptions.Warning:
raise
except openerp.exceptions.DeferredException, e:
_logger.exception(tools.exception_to_unicode(e))
post_mortem(e.traceback)
raise
except Exception, e:
_logger.exception(tools.exception_to_unicode(e))
post_mortem(sys.exc_info())
raise
def post_mortem(info):
if tools.config['debug_mode'] and isinstance(info[2], types.TracebackType):
import pdb
pdb.post_mortem(info[2])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
renzon/pswdless | backend/appengine/config/template_middleware.py | 35 | 3100 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.api.namespace_manager import get_namespace
from jinja2.exceptions import TemplateNotFound
from tekton import router
from tekton.gae.middleware.response import ResponseBase
from tekton.gae.middleware import Middleware
from config import template
class TemplateResponse(ResponseBase):
def __init__(self, context=None, template_path=None):
"""
Class to render template and send it through HTTP response
context: the context dict form template rendering
template_path: the path for te template. If None it will find the template by convention, according to path
"""
super(TemplateResponse, self).__init__(context)
self.template_path = template_path
class TemplateMiddleware(Middleware):
def set_up(self):
self.dependencies["_render"] = render_by_namespace
_TMPL_NOT_FOUND_MSG = '''Template not found
Looked by convention in /routes/templates directory for:
1) %s
2) %s
Create one of the two template files or explicit indicate which one to use on TemplateResponse'''
def render_by_namespace(template_path, context={}):
ns = get_namespace()
if not ns:
return template.render(template_path, context)
try:
return template.render('/'.join([ns, template_path]), context)
except TemplateNotFound:
return template.render(template_path, context)
def render_by_convention(fcn, context):
template_path = router.to_path(fcn)
def try_render(suffix):
if template_path == '/':
return '/home.html', render_by_namespace('/home.html', context)
try:
template_file = template_path + suffix
return template_file, render_by_namespace(template_file, context)
except TemplateNotFound:
return template_file, None
template_1, tmpl_rendered = try_render('.html')
if tmpl_rendered is None:
template_2, tmpl_rendered = try_render('/home.html')
if tmpl_rendered is None:
raise TemplateNotFound(_TMPL_NOT_FOUND_MSG % (template_1, template_2))
return tmpl_rendered
class TemplateWriteMiddleware(Middleware):
def set_up(self):
fcn_response = self.dependencies['_fcn_response']
fcn = self.dependencies['_fcn']
if isinstance(fcn_response, TemplateResponse):
context = fcn_response.context or {}
for key in ('_logged_user', '_login_path', '_logout_path'):
context[key] = self.dependencies[key]
if '_csrf_code' in self.dependencies:
context['_csrf_code'] = self.dependencies['_csrf_code']
template_path = fcn_response.template_path
if template_path is None:
tmpl_rendered = render_by_convention(fcn, context)
else:
tmpl_rendered = render_by_namespace(template_path, context)
self.handler.response.write(tmpl_rendered)
return True # after response, there is no need to look for more middlewares | gpl-2.0 |
dcjohnston/geojs | dashboard/github_service/dashboard.py | 2 | 5762 | #!/usr/bin/env python
import os
import shutil
import socket
from datetime import datetime
import subprocess as sp
import json
from pymongo import MongoClient
_ctest = '''
set(CTEST_SOURCE_DIRECTORY "{source}")
set(CTEST_BINARY_DIRECTORY "{build}")
include(${{CTEST_SOURCE_DIRECTORY}}/CTestConfig.cmake)
set(CTEST_SITE "{site}")
set(CTEST_BUILD_NAME "{name}")
set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
ctest_start("Experimental")
ctest_configure()
ctest_build()
ctest_test(PARALLEL_LEVEL 1 RETURN_VALUE res)
ctest_submit()
if(NOT res EQUAL 0)
message(FATAL_ERROR "Test failures occurred.")
endif()
'''
_host = socket.gethostname().split('.')[0]
def config():
return {
'mongo-host': 'lusitania',
'mongo-port': 27017,
'mongo-database': 'geojs_dashboard',
'test-dir': '~/geojs-testing',
'repo': 'https://github.com/OpenGeoscience/geojs.git',
'kill-server': '/Users/jbeezley/bin/killtestserver',
'add-path': '/usr/local/bin',
'cmake': '/usr/local/bin/cmake',
'ctest': '/usr/local/bin/ctest',
'git': '/usr/local/bin/git'
}
def _communicate(cmd, **kw):
cfg = config()
pth = os.environ.get('PATH', '')
if cfg.get('add-path'):
pth = cfg['add-path'] + ':' + pth
kw['stderr'] = sp.STDOUT
kw['stdout'] = sp.PIPE
kw['shell'] = True
p = sp.Popen(
'/usr/bin/env PATH=' + pth + ' ' + cmd,
**kw
)
out, err = p.communicate()
return p.returncode, out
def run_test(repo, commit, testdir, branch):
cfg = config()
git = cfg.get('git', 'git')
cmake = cfg.get('cmake', 'cmake')
ctest = cfg.get('ctest', 'ctest')
print cmake
# ======================
# git clone and checkout
# ======================
s, out = _communicate(' '.join([
git, 'clone',
'--recursive',
repo, testdir
]))
if s != 0:
return (False, 'clone "%s" failed' % repo, out)
s, out = _communicate(' '.join([
git,
'-C', testdir,
'checkout',
commit
]))
if s != 0:
return (False, 'checkout "%s" failed' % commit, out)
s, out = _communicate(' '.join([
git,
'-C', testdir,
'submodule', 'update'
]))
if s != 0:
return (False, 'submodule update failed', out)
# =========
# configure
# =========
builddir = os.path.join(testdir, '_build')
os.makedirs(builddir)
s, out = _communicate(
' '.join([
cmake,
'-D', 'SELENIUM_TESTS=ON',
'-D', 'CHROME_TESTS=OFF',
'-D', 'FIREFOX_TESTS=ON',
'-D', 'COVERAGE_TESTS=OFF',
'..'
]),
cwd=builddir
)
if s != 0:
return (False, 'cmake configure failed', out)
# ==============
# build and test
# ==============
build_script = os.path.join(builddir, 'build.cmake')
kw = {
'source': testdir,
'build': builddir,
'site': _host,
'name': branch + '-' + commit[:6]
}
open(build_script, 'w').write(
_ctest.format(**kw)
)
s, out = _communicate(
ctest + ' -VV -S {}'.format(build_script),
cwd=builddir
)
test_result = s
test_output = out
if test_result != 0:
return (False, 'Test(s) failed', test_output)
return (True, 'All tests passed!', test_output)
def start_test(item, oldTest=None):
if oldTest:
status = {
'pass': oldTest['status']['pass'],
'output': oldTest['status']['output'],
'reason': 'Already tested in branch %s' % oldTest['branch'],
'skipped': True
}
else:
cfg = config()
basedir = os.path.expanduser(cfg['test-dir'])
testdir = os.path.join(basedir, item['commit'])
shutil.rmtree(testdir, ignore_errors=True)
try:
os.makedirs(testdir)
except OSError:
pass
result = run_test(cfg['repo'], item['commit'], testdir, item['branch'])
status = {
'pass': result[0],
'reason': result[1],
'output': result[2],
'skipped': False
}
return status
def notify(item, status):
'''
Do something to notify people, not sure what.
'''
pass
def nightly(queue, results):
for item in queue.find():
oldTest = results.find_one({'commit': item['commit']})
status = start_test(item, oldTest)
if not oldTest:
result = dict(item)
result.pop('_id')
result['time'] = datetime.now()
result['status'] = status
results.insert(result)
queue.remove(item)
notify(item, status)
def continuous(sha, branch, user, queue, results):
oldTest = results.find_one({'commit': sha})
item = {
'commit': sha,
'user': user,
'branch': branch,
'time': datetime.now()
}
status = start_test(item, oldTest)
if not oldTest:
result = dict(item)
result['time'] = datetime.now()
result['status'] = status
results.insert(result)
notify(item, status)
return status
def main(*args):
cfg = config()
cl = MongoClient(
host=cfg['mongo-host'],
port=cfg['mongo-port'],
)
db = cl[cfg['mongo-database']]
queue = db['queue']
results = db['results']
if cfg.get('kill-server'):
sp.call(cfg['kill-server'], shell=True)
if not len(args) or args[0] == 'nightly':
nightly(queue, results)
else:
return continuous(*args[:3], queue=queue, results=results)
if __name__ == '__main__':
import sys
print json.dumps(main(*sys.argv[1:]), indent=4)
| bsd-3-clause |
hujiajie/chromium-crosswalk | tools/perf_expectations/update_perf_expectations_unittest.py | 161 | 10350 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for update_perf_expectations."""
import copy
from StringIO import StringIO
import unittest
import make_expectations as perf_ex_lib
import update_perf_expectations as upe_mod
# A separate .json file contains the list of test cases we'll use.
# The tests used to be defined inline here, but are >80 characters in length.
# Now they are expected to be defined in file ./sample_test_cases.json.
# Create a dictionary of tests using .json file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile('sample_test_cases.json'))
# Get all keys.
all_tests_keys = all_tests.keys()
def VerifyPreparedTests(self, tests_to_update, reva, revb):
# Work with a copy of the set of tests.
all_tests_copy = copy.deepcopy(all_tests)
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests_copy, reva, revb)
# Make sure reva < revb
if reva > revb:
temp = reva
reva = revb
revb = temp
# Run through all tests and make sure only those that were
# specified to be modified had their 'sha1' value removed.
for test_key in all_tests_keys:
new_test_value = all_tests_copy[test_key]
original_test_value = all_tests[test_key]
if test_key in tests_to_update:
# Make sure there is no "sha1".
self.assertFalse('sha1' in new_test_value)
# Make sure reva and revb values are correctly set.
self.assertEqual(reva, new_test_value['reva'])
self.assertEqual(revb, new_test_value['revb'])
else:
# Make sure there is an "sha1" value
self.assertTrue('sha1' in new_test_value)
# Make sure the sha1, reva and revb values have not changed.
self.assertEqual(original_test_value['sha1'], new_test_value['sha1'])
self.assertEqual(original_test_value['reva'], new_test_value['reva'])
self.assertEqual(original_test_value['revb'], new_test_value['revb'])
class UpdatePerfExpectationsTest(unittest.TestCase):
def testFilterMatch(self):
"""Verifies different regular expressions test filter."""
self.maxDiff = None
# Tests to update specified by a single literal string.
tests_to_update = 'win-release/media_tests_av_perf/fps/tulip2.webm'
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
self.assertEqual(expected_tests_list,
upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys))
# Tests to update specified by a single reg-ex
tests_to_update = 'win-release/media_tests_av_perf/fps.*'
expected_tests_list = ['win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
# Tests to update are specified by a single reg-ex, spanning multiple OSes.
tests_to_update = '.*-release/media_tests_av_perf/fps.*'
expected_tests_list = ['linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
def testLinesFromInputFile(self):
"""Verifies different string formats specified in input file."""
# Tests to update have been specified by a single literal string in
# an input file.
# Use the StringIO class to mock a file object.
lines_from_file = StringIO(
'win-release/media_tests_av_perf/fps/tulip2.webm')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by a single reg-ex in an input file.
lines_from_file = StringIO('win-release/media_tests_av_perf/fps/tulip2.*\n')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by multiple lines in an input file.
lines_from_file = StringIO(
'.*-release/media_tests_av_perf/fps/tulip2.*\n'
'win-release/media_tests_av_perf/dropped_fps/tulip2.*\n'
'linux-release/media_tests_av_perf/audio_latency/latency')
contents = lines_from_file.read()
expected_tests_list = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
def testPreparingForUpdate(self):
"""Verifies that tests to be modified are changed as expected."""
tests_to_update = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
# Test regular positive integers.
reva = 12345
revb = 54321
VerifyPreparedTests(self, tests_to_update, reva, revb)
# Test negative values.
reva = -54321
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test reva greater than revb.
reva = 54321
revb = 12345
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test non-integer values
reva = 'sds'
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mozilla/stoneridge | python/src/Lib/lib-tk/test/test_ttk/test_widgets.py | 21 | 39530 | import unittest
import Tkinter
import ttk
from test.test_support import requires, run_unittest
import sys
import support
from test_functions import MockTclObj, MockStateSpec
requires('gui')
class WidgetTest(unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
support.root_deiconify()
self.widget = ttk.Button(width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def tearDown(self):
self.widget.destroy()
support.root_withdraw()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(Tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(Tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(Tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(Tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(Tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class ButtonTest(unittest.TestCase):
def test_invoke(self):
success = []
btn = ttk.Button(command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
class CheckbuttonTest(unittest.TestCase):
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(Tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertEqual(res, '')
self.assertFalse(len(success) > 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
class ComboboxTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.combo = ttk.Combobox()
def tearDown(self):
self.combo.destroy()
support.root_withdraw()
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
check_get_current('', -1)
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'], ('1', '', '2'))
# out of range
self.assertRaises(Tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(Tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(values=[1, 2, ''])
self.assertEqual(combo2['values'], ('1', '2', ''))
combo2.destroy()
class EntryTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.entry = ttk.Entry()
def tearDown(self):
self.entry.destroy()
support.root_withdraw()
def test_bbox(self):
self.assertEqual(len(self.entry.bbox(0)), 4)
for item in self.entry.bbox(0):
self.assertTrue(isinstance(item, int))
self.assertRaises(Tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(Tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(Tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(Tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(Tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(Tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
class PanedwindowTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.paned = ttk.Panedwindow()
def tearDown(self):
self.paned.destroy()
support.root_withdraw()
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(Tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label()
child = ttk.Label(label)
self.assertRaises(Tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label()
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(Tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(Tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(Tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(Tkinter.TclError, self.paned.forget, None)
self.assertRaises(Tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label())
self.paned.forget(0)
self.assertRaises(Tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(Tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(Tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(Tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label()
child2 = ttk.Label()
child3 = ttk.Label()
self.assertRaises(Tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(Tkinter.TclError, self.paned.pane, 0)
child = ttk.Label()
self.paned.add(child)
self.assertTrue(isinstance(self.paned.pane(0), dict))
self.assertEqual(self.paned.pane(0, weight=None), 0)
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'), 0)
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(Tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(Tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(Tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(Tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(Tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(Tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertTrue(curr_pos != self.paned.sashpos(0))
self.assertTrue(isinstance(self.paned.sashpos(0), int))
class RadiobuttonTest(unittest.TestCase):
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = Tkinter.IntVar()
cbtn = ttk.Radiobutton(command=cb_test, variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(command=cb_test, variable=myvar, value=1)
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['value'], myvar.get())
self.assertEqual(myvar.get(),
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(res, '')
self.assertFalse(len(success) > 1)
self.assertEqual(cbtn2['value'], myvar.get())
self.assertEqual(myvar.get(),
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class ScaleTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.scale = ttk.Scale()
self.scale.pack()
self.scale.update()
def tearDown(self):
self.scale.destroy()
support.root_withdraw()
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(self.scale.get(0, 0), self.scale['from'])
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(Tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(Tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
# set restricts the max/min values according to the current range
max = self.scale['to']
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(self.scale.get(), max)
min = self.scale['from']
self.scale.set(min - 1)
self.assertEqual(self.scale.get(), min)
# changing directly the variable doesn't impose this limitation tho
var = Tkinter.DoubleVar()
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(self.scale.get(), var.get())
self.assertEqual(self.scale.get(), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(self.scale.get(), max + 10)
self.assertEqual(self.scale.get(), self.scale['value'])
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(self.scale.get(0, 0), min)
self.assertEqual(self.scale.get(self.scale.winfo_width(), 0), max)
self.assertRaises(Tkinter.TclError, self.scale.set, None)
class NotebookTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.nb = ttk.Notebook(padding=0)
self.child1 = ttk.Label()
self.child2 = ttk.Label()
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def tearDown(self):
self.child1.destroy()
self.child2.destroy()
self.nb.destroy()
support.root_withdraw()
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(Tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except Tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(Tkinter.TclError, self.nb.hide, -1)
self.assertRaises(Tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(Tkinter.TclError, self.nb.hide, None)
self.assertRaises(Tkinter.TclError, self.nb.add, None)
self.assertRaises(Tkinter.TclError, self.nb.add, ttk.Label(),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label()
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertTrue(str(self.child2) == self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(Tkinter.TclError, self.nb.forget, -1)
self.assertRaises(Tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(Tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertFalse(str(self.child1) in self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertFalse(child1_index == self.nb.index(self.child1))
def test_index(self):
self.assertRaises(Tkinter.TclError, self.nb.index, -1)
self.assertRaises(Tkinter.TclError, self.nb.index, None)
self.assertTrue(isinstance(self.nb.index('end'), int))
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(Tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(Tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label()
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(Tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(Tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(Tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(Tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(Tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(Tkinter.TclError, self.nb.tab, -1)
self.assertRaises(Tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(Tkinter.TclError, self.nb.tab, None)
self.assertTrue(isinstance(self.nb.tab(self.child1), dict))
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
support.simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
support.simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
class TreeviewTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.tv = ttk.Treeview(padding=0)
def tearDown(self):
self.tv.destroy()
support.root_withdraw()
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertEqual(len(bbox), 4)
self.assertTrue(isinstance(bbox, tuple))
for item in bbox:
if not isinstance(item, int):
self.fail("Invalid bounding box: %s" % bbox)
break
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertTrue(isinstance(self.tv.get_children(), tuple))
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(Tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertTrue(isinstance(self.tv.column('#0'), dict))
# return a single value of the given option
self.assertTrue(isinstance(self.tv.column('#0', width=None), int))
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'), 10)
self.assertEqual(self.tv.column('#0', width=None), 10)
# check read-only option
self.assertRaises(Tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(Tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(Tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(Tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(Tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(Tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(Tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(Tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(Tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(Tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(Tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertTrue(isinstance(self.tv.heading('#0'), dict))
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(Tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(Tkinter.TclError, self.tv.heading, '#0',
anchor=1)
# XXX skipping for now; should be fixed to work with newer ttk
@unittest.skip("skipping pending resolution of Issue #10734")
def test_heading_callback(self):
def simulate_heading_click(x, y):
support.simulate_mouse_click(self.tv, x, y)
self.tv.update_idletasks()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(Tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(Tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(Tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = u'\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'), (value, ))
self.assertEqual(self.tv.item(item, values=None), (value, ))
self.tv.item(item, values=list(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None), (value, ))
self.assertTrue(isinstance(self.tv.item(item), dict))
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None), ('1', '2', value))
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None), ('1', '2'))
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None), ('a', 'a'))
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None), ('b', 'a'))
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'), 123)
self.assertEqual(self.tv.item(item, values=None), (123, 'a'))
self.assertEqual(self.tv.set(item), {'B': 123})
# inexistent column
self.assertRaises(Tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(Tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(Tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
support.simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(Tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertTrue(isinstance(self.tv.tag_configure('test'), dict))
tests_gui = (
WidgetTest, ButtonTest, CheckbuttonTest, RadiobuttonTest,
ComboboxTest, EntryTest, PanedwindowTest, ScaleTest, NotebookTest,
TreeviewTest
)
if __name__ == "__main__":
run_unittest(*tests_gui)
| mpl-2.0 |
ojengwa/odoo | addons/product_extended/wizard/wizard_price.py | 270 | 3043 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP S.A. (<http://www.openerp.com>).
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
class wizard_price(osv.osv):
_name = "wizard.price"
_description = "Compute price wizard"
_columns = {
'info_field': fields.text('Info', readonly=True),
'real_time_accounting': fields.boolean("Generate accounting entries when real-time"),
'recursive': fields.boolean("Change prices of child BoMs too"),
}
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_price, self).default_get(cr, uid, fields, context=context)
product_pool = self.pool.get('product.template')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
res['info_field'] = str(product_pool.compute_price(cr, uid, [], template_ids=[product_obj.id], test=True, context=context))
return res
def compute_from_bom(self, cr, uid, ids, context=None):
assert len(ids) == 1
if context is None:
context = {}
model = context.get('active_model')
if model != 'product.template':
raise except_orm(_('Wrong model!'), _('This wizard is build for product templates, while you are currently running it from a product variant.'))
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
prod_obj = self.pool.get('product.template')
res = self.browse(cr, uid, ids, context=context)
prod = prod_obj.browse(cr, uid, rec_id, context=context)
prod_obj.compute_price(cr, uid, [], template_ids=[prod.id], real_time_accounting=res[0].real_time_accounting, recursive=res[0].recursive, test=False, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andrewcmyers/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py | 106 | 2900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network components for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
class FullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, self.params.layer_size)
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations = layers.fully_connected(nn_activations,
self.params.layer_size)
return nn_activations
class ManyToOneLayer(hybrid_layer.HybridLayer):
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, 1)
# There is always one activation per instance by definition, so squeeze
# away the extra dimension.
return array_ops.squeeze(nn_activations, squeeze_dims=[1])
class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected flattened feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = [layers.fully_connected(data, self.params.layer_size)]
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations.append(
layers.fully_connected(
nn_activations[-1],
self.params.layer_size))
nn_activations_tensor = array_ops.concat(
nn_activations, 1, name="flattened_nn_activations")
return nn_activations_tensor
| apache-2.0 |
kittiu/account-invoicing | account_group_invoice_lines/__openerp__.py | 4 | 1576 | # -*- coding: utf-8 -*-
##############################################################################
#
# account_group_invoice_lines module for Odoo
# Copyright (C) 2012-2015 SYLEAM Info Services (<http://www.syleam.fr/>)
# Copyright (C) 2015 Akretion (http://www.akretion.com)
# @author: Sébastien LANGE <sebastien.lange@syleam.fr>
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Group Invoice Lines',
'version': '8.0.1.1.0',
'category': 'Accounting & Finance',
'summary': 'Add option to group invoice line per account',
'author': 'SYLEAM,Akretion,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'website': 'http://www.syleam.fr/',
'depends': ['account'],
'data': ['account_view.xml'],
'installable': False,
}
| agpl-3.0 |
mholgatem/GPIOnext | config/menus.py | 1 | 4183 | import time
from config.constants import *
from config import SQL
from cursesmenu import *
from cursesmenu.items import *
import curses
'''
---------------------------------------------------------
This script handles menu navigation
RETURNS: dictionary containing device name,
number of buttons, number of axis
---------------------------------------------------------
'''
GOTO_MAIN = -999
def close():
if CursesMenu.stdscr != None:
CursesMenu().exit()
def clearPreviousMenu():
# clear any previous menus
if CursesMenu.stdscr != None:
CursesMenu.stdscr.erase()
def showMainMenu():
global currentDevice
clearPreviousMenu()
currentDevice = {'name': None,
'axisCount': 0,
'buttons': 0}
options = DEVICE_LIST + ['Clear Device']
choice = SelectionMenu.get_selection(
strings = options,
title = 'GPIOnext Config',
subtitle = 'Which virtual device do you want to CONFIGURE?'
)
try:
currentDevice['name'] = options [ choice ]
except IndexError: # user selected 'Exit'
return None
if currentDevice['name'] == 'Clear Device':
return clearDevice()
elif currentDevice['name']== 'Keyboard':
title = 'Select the keys that you want to assign'
return selectFromList( KEY_LIST, title )
elif currentDevice['name'] == 'Commands':
return currentDevice
else:
return getJoyAxisCount()
def clearDevice():
clearPreviousMenu()
options = DEVICE_LIST + ['← Return to Main Menu']
choice = SelectionMenu.get_selection(
strings = options,
title = 'CLEAR DEVICE',
subtitle = 'Remove configs for which device?',
exit_option = False
)
currentDevice['name'] = options[choice]
if currentDevice['name'] == '← Return to Main Menu':
return GOTO_MAIN
else:
clearPreviousMenu()
print( 'Deleting config files for {0}...'.format( currentDevice['name'] ))
SQL.deleteDevice( currentDevice['name'] )
time.sleep(1)
return clearDevice()
def getJoyAxisCount( ):
global currentDevice
clearPreviousMenu()
axisList = ['0','1','2','3','4','← Return to Main Menu']
dpadCount = SelectionMenu.get_selection(
strings = axisList,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'How many Dpads/Joysticks does this controller have?',
exit_option = False
)
currentDevice['axisCount'] = dpadCount
# if Return to Main Menu
if dpadCount == 5:
return GOTO_MAIN
else:
title = 'Select the buttons that you want to assign'
return selectFromList( BUTTON_LIST, title)
def editCommandButton():
global currentDevice
cmdList = SQL.getDeviceRaw( 'Commands' )
entries = [ '• Edit Command: {0}'.format( x['name'] ) for x in cmdList ]
entries.insert( 0, '• Add New Command' )
entries.append( '← Return to Main Menu' )
edit = 2
while edit == 2:
clearPreviousMenu()
choice = SelectionMenu.get_selection(
strings = entries,
title = 'Configuring {0}'.format( currentDevice['name'] ),
subtitle = 'Select a command to edit',
exit_option = False
)
if choice == 0:
return ( 'EDIT', {'command':'', 'pins': None, 'id': None, 'device': None, 'name': '', 'type':'COMMAND' } )
elif choice == len( entries ) - 1:
return GOTO_MAIN
clearPreviousMenu()
edit = SelectionMenu.get_selection(
strings = ['Edit', 'Delete', '← Go Back' ],
title = 'Configuring {0}'.format( cmdList[ choice - 1 ]['name'] ),
subtitle = 'Edit or Delete this command?',
exit_option = False
)
edit = 'EDIT' if edit == 0 else 'DELETE'
return ( edit, cmdList[ choice - 1 ] )
def selectFromList( currentList, title ):
global currentDevice
buttonNames = [ b[0] for b in currentList ]
buttonNames.append( '← Return to Main Menu' )
# returns list of buttons to configure
choice = MultiSelect.get_selection(
strings = buttonNames,
title = title,
exit_option = False
)
# return to main menu
if choice == [-1]:
return GOTO_MAIN
chosenButtons = [b for b in currentList if b[0] in choice]
currentDevice['buttons'] = chosenButtons
return currentDevice
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/comtypes-1.1.3-py2.7.egg/comtypes/client/__init__.py | 5 | 10460 | '''comtypes.client - High level client level COM support package.
'''
################################################################
#
# TODO:
#
# - refactor some code into modules
#
################################################################
import sys, os
import ctypes
import comtypes
from comtypes.hresult import *
import comtypes.automation
import comtypes.typeinfo
import comtypes.client.dynamic
from comtypes.client._events import GetEvents, ShowEvents, PumpEvents
from comtypes.client._generate import GetModule
import logging
logger = logging.getLogger(__name__)
__all__ = ["CreateObject", "GetActiveObject", "CoGetObject",
"GetEvents", "ShowEvents", "PumpEvents", "GetModule",
"GetClassObject"]
from comtypes.client._code_cache import _find_gen_dir
gen_dir = _find_gen_dir()
import comtypes.gen
### for testing
##gen_dir = None
def wrap_outparam(punk):
logger.debug("wrap_outparam(%s)", punk)
if not punk:
return None
if punk.__com_interface__ == comtypes.automation.IDispatch:
return GetBestInterface(punk)
return punk
def GetBestInterface(punk):
"""Try to QueryInterface a COM pointer to the 'most useful'
interface.
Get type information for the provided object, either via
IDispatch.GetTypeInfo(), or via IProvideClassInfo.GetClassInfo().
Generate a wrapper module for the typelib, and QI for the
interface found.
"""
if not punk: # NULL COM pointer
return punk # or should we return None?
# find the typelib and the interface name
logger.debug("GetBestInterface(%s)", punk)
try:
try:
pci = punk.QueryInterface(comtypes.typeinfo.IProvideClassInfo)
logger.debug("Does implement IProvideClassInfo")
except comtypes.COMError:
# Some COM objects support IProvideClassInfo2, but not IProvideClassInfo.
# These objects are broken, but we support them anyway.
logger.debug("Does NOT implement IProvideClassInfo, trying IProvideClassInfo2")
pci = punk.QueryInterface(comtypes.typeinfo.IProvideClassInfo2)
logger.debug("Does implement IProvideClassInfo2")
tinfo = pci.GetClassInfo() # TypeInfo for the CoClass
# find the interface marked as default
ta = tinfo.GetTypeAttr()
for index in range(ta.cImplTypes):
if tinfo.GetImplTypeFlags(index) == 1:
break
else:
if ta.cImplTypes != 1:
# Hm, should we use dynamic now?
raise TypeError("No default interface found")
# Only one interface implemented, use that (even if
# not marked as default).
index = 0
href = tinfo.GetRefTypeOfImplType(index)
tinfo = tinfo.GetRefTypeInfo(href)
except comtypes.COMError:
logger.debug("Does NOT implement IProvideClassInfo/IProvideClassInfo2")
try:
pdisp = punk.QueryInterface(comtypes.automation.IDispatch)
except comtypes.COMError:
logger.debug("No Dispatch interface: %s", punk)
return punk
try:
tinfo = pdisp.GetTypeInfo(0)
except comtypes.COMError:
pdisp = comtypes.client.dynamic.Dispatch(pdisp)
logger.debug("IDispatch.GetTypeInfo(0) failed: %s" % pdisp)
return pdisp
typeattr = tinfo.GetTypeAttr()
logger.debug("Default interface is %s", typeattr.guid)
try:
punk.QueryInterface(comtypes.IUnknown, typeattr.guid)
except comtypes.COMError:
logger.debug("Does not implement default interface, returning dynamic object")
return comtypes.client.dynamic.Dispatch(punk)
itf_name = tinfo.GetDocumentation(-1)[0] # interface name
tlib = tinfo.GetContainingTypeLib()[0] # typelib
# import the wrapper, generating it on demand
mod = GetModule(tlib)
# Python interface class
interface = getattr(mod, itf_name)
logger.debug("Implements default interface from typeinfo %s", interface)
# QI for this interface
# XXX
# What to do if this fails?
# In the following example the engine.Eval() call returns
# such an object.
#
# engine = CreateObject("MsScriptControl.ScriptControl")
# engine.Language = "JScript"
# engine.Eval("[1, 2, 3]")
#
# Could the above code, as an optimization, check that QI works,
# *before* generating the wrapper module?
result = punk.QueryInterface(interface)
logger.debug("Final result is %s", result)
return result
# backwards compatibility:
wrap = GetBestInterface
# Should we do this for POINTER(IUnknown) also?
ctypes.POINTER(comtypes.automation.IDispatch).__ctypes_from_outparam__ = wrap_outparam
################################################################
#
# Typelib constants
#
class Constants(object):
"""This class loads the type library from the supplied object,
then exposes constants in the type library as attributes."""
def __init__(self, obj):
obj = obj.QueryInterface(comtypes.automation.IDispatch)
tlib, index = obj.GetTypeInfo(0).GetContainingTypeLib()
self.tcomp = tlib.GetTypeComp()
def __getattr__(self, name):
try:
kind, desc = self.tcomp.Bind(name)
except (WindowsError, comtypes.COMError):
raise AttributeError(name)
if kind != "variable":
raise AttributeError(name)
return desc._.lpvarValue[0].value
def _bind_type(self, name):
return self.tcomp.BindType(name)
################################################################
#
# Object creation
#
def GetActiveObject(progid, interface=None, dynamic=False):
"""Return a pointer to a running COM object that has been
registered with COM.
'progid' may be a string like "Excel.Application",
a string specifying a clsid, a GUID instance, or an object with
a _clsid_ attribute which should be any of the above.
'interface' allows to force a certain interface.
'dynamic=True' will return a dynamic dispatch object.
"""
clsid = comtypes.GUID.from_progid(progid)
if dynamic:
if interface is not None:
raise ValueError("interface and dynamic are mutually exclusive")
interface = comtypes.automation.IDispatch
elif interface is None:
interface = getattr(progid, "_com_interfaces_", [None])[0]
obj = comtypes.GetActiveObject(clsid, interface=interface)
if dynamic:
return comtypes.client.dynamic.Dispatch(obj)
return _manage(obj, clsid, interface=interface)
def _manage(obj, clsid, interface):
obj.__dict__['__clsid'] = str(clsid)
if interface is None:
obj = GetBestInterface(obj)
return obj
def GetClassObject(progid,
clsctx=None,
pServerInfo=None,
interface=None):
"""Create and return the class factory for a COM object.
'clsctx' specifies how to create the object, use the CLSCTX_... constants.
'pServerInfo', if used, must be a pointer to a comtypes.COSERVERINFO instance
'interface' may be used to request an interface other than IClassFactory
"""
clsid = comtypes.GUID.from_progid(progid)
return comtypes.CoGetClassObject(clsid,
clsctx, pServerInfo, interface)
def CreateObject(progid, # which object to create
clsctx=None, # how to create the object
machine=None, # where to create the object
interface=None, # the interface we want
dynamic=False, # use dynamic dispatch
pServerInfo=None): # server info struct for remoting
"""Create a COM object from 'progid', and try to QueryInterface()
it to the most useful interface, generating typelib support on
demand. A pointer to this interface is returned.
'progid' may be a string like "InternetExplorer.Application",
a string specifying a clsid, a GUID instance, or an object with
a _clsid_ attribute which should be any of the above.
'clsctx' specifies how to create the object, use the CLSCTX_... constants.
'machine' allows to specify a remote machine to create the object on.
'interface' allows to force a certain interface
'dynamic=True' will return a dynamic dispatch object
'pServerInfo', if used, must be a pointer to a comtypes.COSERVERINFO instance
This supercedes 'machine'.
You can also later request to receive events with GetEvents().
"""
clsid = comtypes.GUID.from_progid(progid)
logger.debug("%s -> %s", progid, clsid)
if dynamic:
if interface:
raise ValueError("interface and dynamic are mutually exclusive")
interface = comtypes.automation.IDispatch
elif interface is None:
interface = getattr(progid, "_com_interfaces_", [None])[0]
if machine is None and pServerInfo is None:
logger.debug("CoCreateInstance(%s, clsctx=%s, interface=%s)",
clsid, clsctx, interface)
obj = comtypes.CoCreateInstance(clsid, clsctx=clsctx, interface=interface)
else:
logger.debug("CoCreateInstanceEx(%s, clsctx=%s, interface=%s, machine=%s,\
pServerInfo=%s)",
clsid, clsctx, interface, machine, pServerInfo)
if machine is not None and pServerInfo is not None:
msg = "You can notset both the machine name and server info."
raise ValueError(msg)
obj = comtypes.CoCreateInstanceEx(clsid, clsctx=clsctx,
interface=interface, machine=machine, pServerInfo=pServerInfo)
if dynamic:
return comtypes.client.dynamic.Dispatch(obj)
return _manage(obj, clsid, interface=interface)
def CoGetObject(displayname, interface=None, dynamic=False):
"""Create an object by calling CoGetObject(displayname).
Additional parameters have the same meaning as in CreateObject().
"""
if dynamic:
if interface is not None:
raise ValueError("interface and dynamic are mutually exclusive")
interface = comtypes.automation.IDispatch
punk = comtypes.CoGetObject(displayname, interface)
if dynamic:
return comtypes.client.dynamic.Dispatch(punk)
return _manage(punk,
clsid=None,
interface=interface)
| gpl-3.0 |
Statoil/libres | python/tests/res/enkf/data/test_custom_kw.py | 1 | 4935 | import os
import pytest
from res.enkf.enums import EnkfRunType
from res.enkf import ErtRunContext
from res.enkf.config import CustomKWConfig
from res.enkf.data import CustomKW
from res.enkf.enkf_simulation_runner import EnkfSimulationRunner
from res.enkf.export import custom_kw_collector
from res.enkf.export.custom_kw_collector import CustomKWCollector
from res.test.ert_test_context import ErtTestContext
from tests import ResTest
from ecl.util.test.test_area import TestAreaContext
from ecl.util.util import StringList
from ecl.util.util import BoolVector
from tests.utils import tmpdir
class CustomKWTest(ResTest):
def createResultFile(self, filename, data):
with open(filename, "w") as output_file:
for key in data:
output_file.write("%s %s\n" % (key, data[key]))
def test_custom_kw_creation(self):
data = {"VALUE_1": 2345.234,
"VALUE_2": 0.001234,
"VALUE_3": "string_1",
"VALUE_4": "string_2"}
with TestAreaContext("python/enkf/data/custom_kw_creation") as test_area:
self.createResultFile("result_file", data)
custom_kw_config = CustomKWConfig("CUSTOM_KW", "result_file")
self.assertEqual(len(custom_kw_config), 0)
custom_kw = CustomKW(custom_kw_config)
custom_kw.fload("result_file")
self.assertEqual(len(custom_kw_config), 4)
for key in data:
index = custom_kw_config.indexOfKey(key)
self.assertEqual(data[key], custom_kw[key])
with self.assertRaises(KeyError):
value = custom_kw["VALUE_5"]
def test_custom_kw_config_data_is_null(self):
data_1 = {"VALUE_1": 123453.3,
"VALUE_2": 0.234234}
data_2 = {"VALUE_1": 965689,
"VALUE_3": 1.1222}
with TestAreaContext("python/enkf/data/custom_kw_null_element") as test_area:
self.createResultFile("result_file_1", data_1)
self.createResultFile("result_file_2", data_2)
custom_kw_config = CustomKWConfig("CUSTOM_KW", "result_file")
custom_kw_1 = CustomKW(custom_kw_config)
custom_kw_1.fload("result_file_1")
custom_kw_2 = CustomKW(custom_kw_config)
custom_kw_2.fload("result_file_2")
index_1 = custom_kw_config.indexOfKey("VALUE_1")
index_2 = custom_kw_config.indexOfKey("VALUE_2")
self.assertEqual(custom_kw_1["VALUE_1"], data_1["VALUE_1"])
self.assertEqual(custom_kw_2["VALUE_1"], data_2["VALUE_1"])
self.assertIsNone(custom_kw_2["VALUE_2"])
self.assertFalse( "VALUE_3" in custom_kw_config )
@tmpdir()
def test_simulated_custom_kw(self):
config = self.createTestPath("local/custom_kw/mini_config")
with ErtTestContext("python/enkf/data/custom_kw_simulated", config) as context:
ert = context.getErt()
ensemble_config = ert.ensembleConfig()
self.assertTrue("AGGREGATED" in ensemble_config)
config = ensemble_config.getNode("AGGREGATED").getCustomKeywordModelConfig()
self.assertEqual(len(config.getKeys()), 0)
simulation_runner = EnkfSimulationRunner(ert)
job_queue = ert.get_queue_config().create_job_queue()
iteration_count = 0
active = BoolVector(default_value = True, initial_size = 4)
subst_list = ert.getDataKW( )
runpath_fmt = ert.getModelConfig( ).getRunpathFormat( )
fs_manager = ert.getEnkfFsManager( )
fs = fs_manager.getFileSystem("fs")
jobname_fmt = ert.getModelConfig( ).getJobnameFormat( )
run_context = ErtRunContext( EnkfRunType.ENSEMBLE_EXPERIMENT , fs, None , active , runpath_fmt, jobname_fmt, subst_list , iteration_count)
simulation_runner.createRunPath( run_context )
simulation_runner.runEnsembleExperiment(job_queue, run_context)
config = ensemble_config.getNode("AGGREGATED").getCustomKeywordModelConfig()
self.assertEqual(len(config.getKeys()), 4)
self.assertItemsEqual(config.getKeys(), ["PERLIN_1", "PERLIN_2", "PERLIN_3", "STATE"])
def test_custom_kw_set_values(self):
definition = {
"STRING": str,
"FLOAT": float,
"INT": float
}
ckwc = CustomKWConfig("Test", None, definition=definition)
ckw = CustomKW(ckwc)
with self.assertRaises(KeyError):
ckw["ANOTHER_STRING"] = "another string"
ckw["STRING"] = "string"
ckw["FLOAT"] = 3.1415
ckw["INT"] = 1
self.assertEqual(ckw["STRING"], "string")
self.assertEqual(ckw["FLOAT"], 3.1415)
self.assertEqual(ckw["INT"], 1)
| gpl-3.0 |
kashefy/nideep | nideep/iow/test_read_img.py | 1 | 5461 | '''
Created on Oct 30, 2015
@author: kashefy
'''
from nose.tools import assert_equal, assert_almost_equals
from mock import patch
import os
import tempfile
import shutil
import numpy as np
import cv2 as cv2
import read_img as r
class TestReadImage:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
self.img1 = np.array([[[ 1, 2, 3],
[ 4, 5, 6]
],
[[ 7, 8, 9],
[10, 11, 12]
],
[[13, 14, 15],
[16, 17, 18],
],
[[19, 20, 21],
[22, 23, 24]
]
])
self.path_img1 = os.path.join(self.dir_tmp, "a.png")
cv2.imwrite(self.path_img1, self.img1)
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_read_img_cv2_shape(self):
img = r.read_img_cv2(self.path_img1)
assert_equal(img.shape, (3, 4, 2))
def test_read_img_cv2_pixels(self):
img = r.read_img_cv2(self.path_img1)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch])
def test_read_img_cv2_dtype(self):
img = r.read_img_cv2(self.path_img1)
assert_equal(img.dtype, np.dtype('uint8'))
def test_read_img_cv2_subtract_mean(self):
m = np.array((1., 2. , 3.))
img = r.read_img_cv2(self.path_img1, mean=m)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch] - m[ch])
def test_read_img_PIL_shape(self):
assert_equal(r.read_img_PIL(self.path_img1).shape, (3, 4, 2))
def test_read_img_PIL_pixels(self):
img = r.read_img_PIL(self.path_img1)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch])
def test_read_img_PIL_subtract_mean(self):
m = np.array((1., 2. , 3.))
img = r.read_img_PIL(self.path_img1, mean=m)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_equal(img[ch][row][col], self.img1[row][col][ch] - m[ch])
@patch('nideep.iow.read_img.caffe.io')
def test_read_img_caf_shape(self, mock_io):
mock_io.load_image.return_value = \
np.array([[[0.01176471, 0.00784314, 0.00392157],
[0.02352941, 0.01960784, 0.01568628]
],
[[0.03529412, 0.03137255, 0.02745098],
[0.04705882, 0.04313726, 0.03921569],
],
[[0.05882353, 0.05490196, 0.05098039],
[0.07058824, 0.06666667, 0.0627451 ]
],
[[0.08235294, 0.07843138, 0.07450981],
[0.09411765, 0.09019608, 0.08627451]
]
])
assert_equal(r.read_img_caf(self.path_img1).shape, (3, 4, 2))
@patch('nideep.iow.read_img.caffe.io')
def test_read_img_caf_pixels(self, mock_io):
mock_io.load_image.return_value = \
np.array([[[0.01176471, 0.00784314, 0.00392157],
[0.02352941, 0.01960784, 0.01568628]
],
[[0.03529412, 0.03137255, 0.02745098],
[0.04705882, 0.04313726, 0.03921569],
],
[[0.05882353, 0.05490196, 0.05098039],
[0.07058824, 0.06666667, 0.0627451 ]
],
[[0.08235294, 0.07843138, 0.07450981],
[0.09411765, 0.09019608, 0.08627451]
]
])
img = r.read_img_caf(self.path_img1)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_almost_equals(img[ch][row][col], self.img1[row][col][ch], places=5)
@patch('nideep.iow.read_img.caffe.io')
def test_read_img_caf_subtract_mean(self, mock_io):
mock_io.load_image.return_value = \
np.array([[[0.01176471, 0.00784314, 0.00392157],
[0.02352941, 0.01960784, 0.01568628]
],
[[0.03529412, 0.03137255, 0.02745098],
[0.04705882, 0.04313726, 0.03921569],
],
[[0.05882353, 0.05490196, 0.05098039],
[0.07058824, 0.06666667, 0.0627451 ]
],
[[0.08235294, 0.07843138, 0.07450981],
[0.09411765, 0.09019608, 0.08627451]
]
])
m = np.array((1., 2. , 3.))
img = r.read_img_caf(self.path_img1, mean=m)
for ch in range(3):
for row in range(4):
for col in range(2):
assert_almost_equals(img[ch][row][col], self.img1[row][col][ch] - m[ch], places=5)
| bsd-2-clause |
mottosso/mindbender-setup | bin/pythonpath/bson/raw_bson.py | 17 | 3504 | # Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing raw BSON documents.
"""
import collections
from bson import _UNPACK_INT, _iterate_elements
from bson.py3compat import iteritems
from bson.codec_options import (
CodecOptions, DEFAULT_CODEC_OPTIONS, _RAW_BSON_DOCUMENT_MARKER)
from bson.errors import InvalidBSON
class RawBSONDocument(collections.Mapping):
"""Representation for a MongoDB document that provides access to the raw
BSON bytes that compose it.
Only when a field is accessed or modified within the document does
RawBSONDocument decode its bytes.
"""
__slots__ = ('__raw', '__inflated_doc', '__codec_options')
_type_marker = _RAW_BSON_DOCUMENT_MARKER
def __init__(self, bson_bytes, codec_options=DEFAULT_CODEC_OPTIONS):
"""Create a new :class:`RawBSONDocument`.
:Parameters:
- `bson_bytes`: the BSON bytes that compose this document
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
"""
self.__raw = bson_bytes
self.__inflated_doc = None
# Always decode documents to their lazy representations.
co = codec_options
self.__codec_options = CodecOptions(
tz_aware=co.tz_aware,
document_class=RawBSONDocument,
uuid_representation=co.uuid_representation,
unicode_decode_error_handler=co.unicode_decode_error_handler,
tzinfo=co.tzinfo)
@property
def raw(self):
"""The raw BSON bytes composing this document."""
return self.__raw
def items(self):
"""Lazily decode and iterate elements in this document."""
return iteritems(self.__inflated)
@property
def __inflated(self):
if self.__inflated_doc is None:
# We already validated the object's size when this document was
# created, so no need to do that again. We still need to check the
# size of all the elements and compare to the document size.
object_size = _UNPACK_INT(self.__raw[:4])[0] - 1
position = 0
self.__inflated_doc = {}
for key, value, position in _iterate_elements(
self.__raw, 4, object_size, self.__codec_options):
self.__inflated_doc[key] = value
if position != object_size:
raise InvalidBSON('bad object or element length')
return self.__inflated_doc
def __getitem__(self, item):
return self.__inflated[item]
def __iter__(self):
return iter(self.__inflated)
def __len__(self):
return len(self.__inflated)
def __eq__(self, other):
if isinstance(other, RawBSONDocument):
return self.__raw == other.raw
return NotImplemented
def __repr__(self):
return ("RawBSONDocument(%r, codec_options=%r)"
% (self.raw, self.__codec_options))
| mit |
jgeskens/django | tests/aggregation_regress/tests.py | 5 | 44406 | from __future__ import absolute_import, unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.core.exceptions import FieldError
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, Approximate, skipUnlessDBFeature
from django.utils import six
from django.utils.unittest import expectedFailure
from .models import (Author, Book, Publisher, Clues, Entries, HardbackBook,
ItemTag, WithManualPK)
class AggregationTests(TestCase):
fixtures = ["aggregation_regress.json"]
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
#oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page' : 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name','mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2*F('num_books')).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub':'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub':'publisher_id', 'foo':'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by()
grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping([], [])
self.assertEqual(len(grouping), 1)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets' : '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
self.assertEqual(
publishers[0].n_books,
2
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_type_conversion(self):
# The database backend convert_values function should not try to covert
# CharFields to float. Refs #13844.
from django.db.models import CharField
from django.db import connection
testData = 'not_a_float_value'
testField = CharField()
self.assertEqual(
connection.ops.convert_values(testData, testField),
testData
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'contact' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_book', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
b = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=b.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(b))
ItemTag.objects.create(object_id=b.id, tag='django',
content_type=ContentType.objects.get_for_model(b))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=b.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
b = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=b.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(b))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2)|Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2)|Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted corretly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig')|Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
| bsd-3-clause |
jwlawson/tensorflow | tensorflow/contrib/boosted_trees/examples/mnist.py | 64 | 5840 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates multiclass MNIST TF Boosted trees example.
This example demonstrates how to run experiments with TF Boosted Trees on
a MNIST dataset. We are using layer by layer boosting with diagonal hessian
strategy for multiclass handling, and cross entropy loss.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/mnist.py \
--output_dir="/tmp/mnist" --depth=4 --learning_rate=0.3 --batch_size=60000 \
--examples_per_layer=60000 --eval_batch_size=10000 --num_eval_steps=1 \
--num_trees=10 --l2=1 --vmodule=training_ops=1
When training is done, accuracy on eval data is reported. Point tensorboard
to the directory for the run to see how the training progresses:
tensorboard --logdir=/tmp/mnist
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.learn import learn_runner
def get_input_fn(dataset_split,
batch_size,
capacity=10000,
min_after_dequeue=3000):
"""Input function over MNIST data."""
def _input_fn():
"""Prepare features and labels."""
images_batch, labels_batch = tf.train.shuffle_batch(
tensors=[dataset_split.images,
dataset_split.labels.astype(np.int32)],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=4)
features_map = {"images": images_batch}
return features_map, labels_batch
return _input_fn
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
num_classes = 10
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.num_classes = num_classes
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2 / FLAGS.examples_per_layer
learner_config.constraints.max_tree_depth = FLAGS.depth
growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.growing_mode = growing_mode
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
# Create a TF Boosted trees estimator that can take in custom loss.
estimator = GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=num_classes,
examples_per_layer=FLAGS.examples_per_layer,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
data = tf.contrib.learn.datasets.mnist.load_mnist()
train_input_fn = get_input_fn(data.train, FLAGS.batch_size)
eval_input_fn = get_input_fn(data.validation, FLAGS.eval_batch_size)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--eval_batch_size",
type=int,
default=1000,
help="Size of the batch for eval.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--examples_per_layer",
type=int,
default=1000,
help="Number of examples to accumulate stats for per layer.")
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mahim97/zulip | zerver/management/commands/set_default_streams.py | 8 | 1912 |
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any, Dict, Text
from zerver.lib.actions import set_default_streams
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Set default streams for a realm
Users created under this realm will start out with these streams. This
command is not additive: if you re-run it on a realm with a different
set of default streams, those will be the new complete set of default
streams.
For example:
./manage.py set_default_streams --realm=foo --streams=foo,bar,baz
./manage.py set_default_streams --realm=foo --streams="foo,bar,baz with space"
./manage.py set_default_streams --realm=foo --streams=
"""
# Fix support for multi-line usage
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-s', '--streams',
dest='streams',
type=str,
help='A comma-separated list of stream names.')
self.add_realm_args(parser, True)
def handle(self, **options: str) -> None:
realm = self.get_realm(options)
if options["streams"] is None:
print("Please provide a default set of streams (which can be empty,\
with `--streams=`).", file=sys.stderr)
exit(1)
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
stream_dict = {
stream.strip(): {"description": stream.strip(), "invite_only": False}
for stream in options["streams"].split(",")
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(realm, stream_dict)
| apache-2.0 |
aldian/tensorflow | tensorflow/python/keras/_impl/keras/utils/vis_utils.py | 13 | 5438 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to model visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot # pylint: disable=g-import-not-at-top
except ImportError:
# Fall back on pydot if necessary.
# Silence a `print` statement that occurs in case of import error,
# by temporarily replacing sys.stdout.
_stdout = sys.stdout
sys.stdout = sys.stderr
try:
import pydot # pylint: disable=g-import-not-at-top
except ImportError:
pydot = None
finally:
# Restore sys.stdout.
sys.stdout = _stdout
def _check_pydot():
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
except Exception:
# pydot raises a generic Exception here,
# so no specific class can be caught.
raise ImportError('Failed to import pydot. You must install pydot'
' and graphviz for `pydotprint` to work.')
def model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='TB'):
"""Convert a Keras model to dot format.
Arguments:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
Returns:
A `pydot.Dot` instance representing the Keras model.
"""
from tensorflow.python.keras._impl.keras.layers.wrappers import Wrapper # pylint: disable=g-import-not-at-top
from tensorflow.python.keras._impl.keras.models import Sequential # pylint: disable=g-import-not-at-top
_check_pydot()
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set_node_defaults(shape='record')
if isinstance(model, Sequential):
if not model.built:
model.build()
model = model.model
layers = model.layers
# Create graph nodes.
for layer in layers:
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, Wrapper):
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including input/output shapes.
if show_shapes:
try:
outputlabels = str(layer.output_shape)
except AttributeError:
outputlabels = 'multiple'
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join([str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = 'multiple'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels,
outputlabels)
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access
node_key = layer.name + '_ib-' + str(i)
if node_key in model._network_nodes: # pylint: disable=protected-access
for inbound_layer in node.inbound_layers:
inbound_layer_id = str(id(inbound_layer))
layer_id = str(id(layer))
dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
return dot
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_layer_names=True,
rankdir='TB'):
"""Converts a Keras model to dot format and save to a file.
Arguments:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
"""
dot = model_to_dot(model, show_shapes, show_layer_names, rankdir)
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
dot.write(to_file, format=extension)
| apache-2.0 |
rlr/fjord | vendor/packages/urllib3/dummyserver/proxy.py | 22 | 4707 | #!/usr/bin/env python
#
# Simple asynchronous HTTP proxy with tunnelling (CONNECT).
#
# GET/POST proxying based on
# http://groups.google.com/group/python-tornado/msg/7bea08e7a049cf26
#
# Copyright (C) 2012 Senko Rasic <senko.rasic@dobarkod.hr>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import socket
import tornado.httpserver
import tornado.ioloop
import tornado.iostream
import tornado.web
import tornado.httpclient
__all__ = ['ProxyHandler', 'run_proxy']
class ProxyHandler(tornado.web.RequestHandler):
SUPPORTED_METHODS = ['GET', 'POST', 'CONNECT']
@tornado.web.asynchronous
def get(self):
def handle_response(response):
if response.error and not isinstance(response.error,
tornado.httpclient.HTTPError):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
self.finish()
else:
self.set_status(response.code)
for header in ('Date', 'Cache-Control', 'Server',
'Content-Type', 'Location'):
v = response.headers.get(header)
if v:
self.set_header(header, v)
if response.body:
self.write(response.body)
self.finish()
req = tornado.httpclient.HTTPRequest(url=self.request.uri,
method=self.request.method, body=self.request.body,
headers=self.request.headers, follow_redirects=False,
allow_nonstandard_methods=True)
client = tornado.httpclient.AsyncHTTPClient()
try:
client.fetch(req, handle_response)
except tornado.httpclient.HTTPError as e:
if hasattr(e, 'response') and e.response:
self.handle_response(e.response)
else:
self.set_status(500)
self.write('Internal server error:\n' + str(e))
self.finish()
@tornado.web.asynchronous
def post(self):
return self.get()
@tornado.web.asynchronous
def connect(self):
host, port = self.request.uri.split(':')
client = self.request.connection.stream
def read_from_client(data):
upstream.write(data)
def read_from_upstream(data):
client.write(data)
def client_close(data=None):
if upstream.closed():
return
if data:
upstream.write(data)
upstream.close()
def upstream_close(data=None):
if client.closed():
return
if data:
client.write(data)
client.close()
def start_tunnel():
client.read_until_close(client_close, read_from_client)
upstream.read_until_close(upstream_close, read_from_upstream)
client.write(b'HTTP/1.0 200 Connection established\r\n\r\n')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
upstream = tornado.iostream.IOStream(s)
upstream.connect((host, int(port)), start_tunnel)
def run_proxy(port, start_ioloop=True):
"""
Run proxy on the specified port. If start_ioloop is True (default),
the tornado IOLoop will be started immediately.
"""
app = tornado.web.Application([
(r'.*', ProxyHandler),
])
app.listen(port)
ioloop = tornado.ioloop.IOLoop.instance()
if start_ioloop:
ioloop.start()
if __name__ == '__main__':
port = 8888
if len(sys.argv) > 1:
port = int(sys.argv[1])
print ("Starting HTTP proxy on port %d" % port)
run_proxy(port)
| bsd-3-clause |
ltiao/networkx | networkx/generators/social.py | 45 | 10871 | """
Famous social networks.
"""
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Katy Bold <kbold@princeton.edu>',
'Aric Hagberg <aric.hagberg@gmail.com)'])
__all__ = ['karate_club_graph', 'davis_southern_women_graph',
'florentine_families_graph']
def karate_club_graph():
"""Return Zachary's Karate Club graph.
Each node in the returned graph has a node attribute ``'club'`` that
indicates the name of the club to which the member represented by that node
belongs, either ``'Mr. Hi'`` or ``'Officer'``.
Examples
--------
To get the name of the club to which a node belongs::
>>> import networkx as nx
>>> G = nx.karate_club_graph()
>>> G.node[5]['club']
'Mr. Hi'
>>> G.node[9]['club']
'Officer'
References
----------
.. [1] Zachary, Wayne W.
"An Information Flow Model for Conflict and Fission in Small Groups."
*Journal of Anthropological Research*, 33, 452--473, (1977).
.. [2] Data file from:
http://vlado.fmf.uni-lj.si/pub/networks/data/Ucinet/UciData.htm
"""
# Create the set of all members, and the members of each club.
all_members = set(range(34))
club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}
# club2 = all_members - club1
G = nx.Graph()
G.add_nodes_from(all_members)
G.name = "Zachary's Karate Club"
zacharydat = """\
0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0
1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0
1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0
1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1
0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1
0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1
0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0"""
for row, line in enumerate(zacharydat.split('\n')):
thisrow = [int(b) for b in line.split()]
for col, entry in enumerate(thisrow):
if entry == 1:
G.add_edge(row, col)
# Add the name of each member's club as a node attribute.
for v in G:
G.node[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'
return G
def davis_southern_women_graph():
"""Return Davis Southern women social network.
This is a bipartite graph.
References
----------
.. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South.
University of Chicago Press, Chicago, IL.
"""
G = nx.Graph()
# Top nodes
women = ["Evelyn Jefferson",
"Laura Mandeville",
"Theresa Anderson",
"Brenda Rogers",
"Charlotte McDowd",
"Frances Anderson",
"Eleanor Nye",
"Pearl Oglethorpe",
"Ruth DeSand",
"Verne Sanderson",
"Myra Liddel",
"Katherina Rogers",
"Sylvia Avondale",
"Nora Fayette",
"Helen Lloyd",
"Dorothy Murchison",
"Olivia Carleton",
"Flora Price"]
G.add_nodes_from(women, bipartite=0)
# Bottom nodes
events = ["E1",
"E2",
"E3",
"E4",
"E5",
"E6",
"E7",
"E8",
"E9",
"E10",
"E11",
"E12",
"E13",
"E14"]
G.add_nodes_from(events, bipartite=1)
G.add_edges_from([("Evelyn Jefferson","E1"),
("Evelyn Jefferson","E2"),
("Evelyn Jefferson","E3"),
("Evelyn Jefferson","E4"),
("Evelyn Jefferson","E5"),
("Evelyn Jefferson","E6"),
("Evelyn Jefferson","E8"),
("Evelyn Jefferson","E9"),
("Laura Mandeville","E1"),
("Laura Mandeville","E2"),
("Laura Mandeville","E3"),
("Laura Mandeville","E5"),
("Laura Mandeville","E6"),
("Laura Mandeville","E7"),
("Laura Mandeville","E8"),
("Theresa Anderson","E2"),
("Theresa Anderson","E3"),
("Theresa Anderson","E4"),
("Theresa Anderson","E5"),
("Theresa Anderson","E6"),
("Theresa Anderson","E7"),
("Theresa Anderson","E8"),
("Theresa Anderson","E9"),
("Brenda Rogers","E1"),
("Brenda Rogers","E3"),
("Brenda Rogers","E4"),
("Brenda Rogers","E5"),
("Brenda Rogers","E6"),
("Brenda Rogers","E7"),
("Brenda Rogers","E8"),
("Charlotte McDowd","E3"),
("Charlotte McDowd","E4"),
("Charlotte McDowd","E5"),
("Charlotte McDowd","E7"),
("Frances Anderson","E3"),
("Frances Anderson","E5"),
("Frances Anderson","E6"),
("Frances Anderson","E8"),
("Eleanor Nye","E5"),
("Eleanor Nye","E6"),
("Eleanor Nye","E7"),
("Eleanor Nye","E8"),
("Pearl Oglethorpe","E6"),
("Pearl Oglethorpe","E8"),
("Pearl Oglethorpe","E9"),
("Ruth DeSand","E5"),
("Ruth DeSand","E7"),
("Ruth DeSand","E8"),
("Ruth DeSand","E9"),
("Verne Sanderson","E7"),
("Verne Sanderson","E8"),
("Verne Sanderson","E9"),
("Verne Sanderson","E12"),
("Myra Liddel","E8"),
("Myra Liddel","E9"),
("Myra Liddel","E10"),
("Myra Liddel","E12"),
("Katherina Rogers","E8"),
("Katherina Rogers","E9"),
("Katherina Rogers","E10"),
("Katherina Rogers","E12"),
("Katherina Rogers","E13"),
("Katherina Rogers","E14"),
("Sylvia Avondale","E7"),
("Sylvia Avondale","E8"),
("Sylvia Avondale","E9"),
("Sylvia Avondale","E10"),
("Sylvia Avondale","E12"),
("Sylvia Avondale","E13"),
("Sylvia Avondale","E14"),
("Nora Fayette","E6"),
("Nora Fayette","E7"),
("Nora Fayette","E9"),
("Nora Fayette","E10"),
("Nora Fayette","E11"),
("Nora Fayette","E12"),
("Nora Fayette","E13"),
("Nora Fayette","E14"),
("Helen Lloyd","E7"),
("Helen Lloyd","E8"),
("Helen Lloyd","E10"),
("Helen Lloyd","E11"),
("Helen Lloyd","E12"),
("Dorothy Murchison","E8"),
("Dorothy Murchison","E9"),
("Olivia Carleton","E9"),
("Olivia Carleton","E11"),
("Flora Price","E9"),
("Flora Price","E11")])
G.graph['top'] = women
G.graph['bottom'] = events
return G
def florentine_families_graph():
"""Return Florentine families graph.
References
----------
.. [1] Ronald L. Breiger and Philippa E. Pattison
Cumulated social roles: The duality of persons and their algebras,1
Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256
"""
G=nx.Graph()
G.add_edge('Acciaiuoli','Medici')
G.add_edge('Castellani','Peruzzi')
G.add_edge('Castellani','Strozzi')
G.add_edge('Castellani','Barbadori')
G.add_edge('Medici','Barbadori')
G.add_edge('Medici','Ridolfi')
G.add_edge('Medici','Tornabuoni')
G.add_edge('Medici','Albizzi')
G.add_edge('Medici','Salviati')
G.add_edge('Salviati','Pazzi')
G.add_edge('Peruzzi','Strozzi')
G.add_edge('Peruzzi','Bischeri')
G.add_edge('Strozzi','Ridolfi')
G.add_edge('Strozzi','Bischeri')
G.add_edge('Ridolfi','Tornabuoni')
G.add_edge('Tornabuoni','Guadagni')
G.add_edge('Albizzi','Ginori')
G.add_edge('Albizzi','Guadagni')
G.add_edge('Bischeri','Guadagni')
G.add_edge('Guadagni','Lamberteschi')
return G
| bsd-3-clause |
lexus24/w16b_test | static/Brython3.1.1-20150328-091302/Lib/_codecs.py | 526 | 4147 |
def ascii_decode(*args,**kw):
pass
def ascii_encode(*args,**kw):
pass
def charbuffer_encode(*args,**kw):
pass
def charmap_build(*args,**kw):
pass
def charmap_decode(*args,**kw):
pass
def charmap_encode(*args,**kw):
pass
def decode(*args,**kw):
"""decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors."""
pass
def encode(*args,**kw):
"""encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors."""
pass
def escape_decode(*args,**kw):
pass
def escape_encode(*args,**kw):
pass
def latin_1_decode(*args,**kw):
pass
def latin_1_encode(*args,**kw):
pass
def lookup(encoding):
"""lookup(encoding) -> CodecInfo
Looks up a codec tuple in the Python codec registry and returns
a CodecInfo object."""
if encoding in ('utf-8', 'utf_8'):
from javascript import console
console.log('encoding', encoding)
import encodings.utf_8
return encodings.utf_8.getregentry()
LookupError(encoding)
def lookup_error(*args,**kw):
"""lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name."""
pass
def mbcs_decode(*args,**kw):
pass
def mbcs_encode(*args,**kw):
pass
def raw_unicode_escape_decode(*args,**kw):
pass
def raw_unicode_escape_encode(*args,**kw):
pass
def readbuffer_encode(*args,**kw):
pass
def register(*args,**kw):
"""register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer)
(or a CodecInfo object)."""
pass
def register_error(*args,**kw):
"""register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple."""
pass
def unicode_escape_decode(*args,**kw):
pass
def unicode_escape_encode(*args,**kw):
pass
def unicode_internal_decode(*args,**kw):
pass
def unicode_internal_encode(*args,**kw):
pass
def utf_16_be_decode(*args,**kw):
pass
def utf_16_be_encode(*args,**kw):
pass
def utf_16_decode(*args,**kw):
pass
def utf_16_encode(*args,**kw):
pass
def utf_16_ex_decode(*args,**kw):
pass
def utf_16_le_decode(*args,**kw):
pass
def utf_16_le_encode(*args,**kw):
pass
def utf_32_be_decode(*args,**kw):
pass
def utf_32_be_encode(*args,**kw):
pass
def utf_32_decode(*args,**kw):
pass
def utf_32_encode(*args,**kw):
pass
def utf_32_ex_decode(*args,**kw):
pass
def utf_32_le_decode(*args,**kw):
pass
def utf_32_le_encode(*args,**kw):
pass
def utf_7_decode(*args,**kw):
pass
def utf_7_encode(*args,**kw):
pass
def utf_8_decode(*args,**kw):
pass
def utf_8_encode(*args,**kw):
input=args[0]
if len(args) == 2:
errors = args[1]
else:
errors=kw.get('errors', 'strict')
#todo need to deal with errors, but for now assume all is well.
return (bytes([_f for _f in input], 'utf-8'), len(input))
| agpl-3.0 |
ojengwa/oh-mainline | vendor/packages/docutils/test/local_dummy_lang.py | 18 | 5350 | # $Id: local_dummy_lang.py 7504 2012-08-27 07:55:20Z grubert $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'dummy Author',
'authors': 'dummy Authors',
'organization': 'dummy Organization',
'address': 'dummy Address',
'contact': 'dummy Contact',
'version': 'dummy Version',
'revision': 'dummy Revision',
'status': 'dummy Status',
'date': 'dummy Date',
'copyright': 'dummy Copyright',
'dedication': 'dummy Dedication',
'abstract': 'dummy Abstract',
'attention': 'dummy Attention!',
'caution': 'dummy Caution!',
'danger': 'dummy !DANGER!',
'error': 'dummy Error',
'hint': 'dummy Hint',
'important': 'dummy Important',
'note': 'dummy Note',
'tip': 'dummy Tip',
'warning': 'dummy Warning',
'contents': 'dummy Contents'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'dummy author': 'author',
'dummy authors': 'authors',
'dummy organization': 'organization',
'dummy address': 'address',
'dummy contact': 'contact',
'dummy version': 'version',
'dummy revision': 'revision',
'dummy status': 'status',
'dummy date': 'date',
'dummy copyright': 'copyright',
'dummy dedication': 'dedication',
'dummy abstract': 'abstract'}
"""English (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
directives = {
# language-dependent: fixed
'dummy attention': 'attention',
'dummy caution': 'caution',
'dummy code': 'code',
'dummy code-block': 'code',
'dummy sourcecode': 'code',
'dummy danger': 'danger',
'dummy error': 'error',
'dummy hint': 'hint',
'dummy important': 'important',
'dummy note': 'note',
'dummy tip': 'tip',
'dummy warning': 'warning',
'dummy admonition': 'admonition',
'dummy sidebar': 'sidebar',
'dummy topic': 'topic',
'dummy line-block': 'line-block',
'dummy parsed-literal': 'parsed-literal',
'dummy rubric': 'rubric',
'dummy epigraph': 'epigraph',
'dummy highlights': 'highlights',
'dummy pull-quote': 'pull-quote',
'dummy compound': 'compound',
'dummy container': 'container',
#'dummy questions': 'questions',
'dummy table': 'table',
'dummy csv-table': 'csv-table',
'dummy list-table': 'list-table',
#'dummy qa': 'questions',
#'dummy faq': 'questions',
'dummy meta': 'meta',
'dummy math': 'math',
#'dummy imagemap': 'imagemap',
'dummy image': 'image',
'dummy figure': 'figure',
'dummy include': 'include',
'dummy raw': 'raw',
'dummy replace': 'replace',
'dummy unicode': 'unicode',
'dummy date': 'date',
'dummy class': 'class',
'dummy role': 'role',
'dummy default-role': 'default-role',
'dummy title': 'title',
'dummy contents': 'contents',
'dummy sectnum': 'sectnum',
'dummy section-numbering': 'sectnum',
'dummy header': 'header',
'dummy footer': 'footer',
#'dummy footnotes': 'footnotes',
#'dummy citations': 'citations',
'dummy target-notes': 'target-notes',
'dummy restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'dummy abbreviation': 'abbreviation',
'dummy ab': 'abbreviation',
'dummy acronym': 'acronym',
'dummy ac': 'acronym',
'dummy code': 'code',
'dummy index': 'index',
'dummy i': 'index',
'dummy subscript': 'subscript',
'dummy sub': 'subscript',
'dummy superscript': 'superscript',
'dummy sup': 'superscript',
'dummy title-reference': 'title-reference',
'dummy title': 'title-reference',
'dummy t': 'title-reference',
'dummy pep-reference': 'pep-reference',
'dummy pep': 'pep-reference',
'dummy rfc-reference': 'rfc-reference',
'dummy rfc': 'rfc-reference',
'dummy emphasis': 'emphasis',
'dummy strong': 'strong',
'dummy literal': 'literal',
'dummy math': 'math',
'dummy named-reference': 'named-reference',
'dummy anonymous-reference': 'anonymous-reference',
'dummy footnote-reference': 'footnote-reference',
'dummy citation-reference': 'citation-reference',
'dummy substitution-reference': 'substitution-reference',
'dummy target': 'target',
'dummy uri-reference': 'uri-reference',
'dummy uri': 'uri-reference',
'dummy url': 'uri-reference',
'dummy raw': 'raw',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
| agpl-3.0 |
Mhynlo/SickRage | lib/tornado/test/options_test.py | 14 | 10324 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import os
import sys
from tornado.options import OptionParser, Error
from tornado.util import basestring_type, PY3
from tornado.test.util import unittest
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
try:
# py33+
from unittest import mock # type: ignore
except ImportError:
try:
import mock # type: ignore
except ImportError:
mock = None
class OptionsTest(unittest.TestCase):
def test_parse_command_line(self):
options = OptionParser()
options.define("port", default=80)
options.parse_command_line(["main.py", "--port=443"])
self.assertEqual(options.port, 443)
def test_parse_config_file(self):
options = OptionParser()
options.define("port", default=80)
options.define("username", default='foo')
options.define("my_path")
config_path = os.path.join(os.path.dirname(__file__),
"options_test.cfg")
options.parse_config_file(config_path)
self.assertEqual(options.port, 443)
self.assertEqual(options.username, "李康")
self.assertEqual(options.my_path, config_path)
def test_parse_callbacks(self):
options = OptionParser()
self.called = False
def callback():
self.called = True
options.add_parse_callback(callback)
# non-final parse doesn't run callbacks
options.parse_command_line(["main.py"], final=False)
self.assertFalse(self.called)
# final parse does
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
# callbacks can be run more than once on the same options
# object if there are multiple final parses
self.called = False
options.parse_command_line(["main.py"])
self.assertTrue(self.called)
def test_help(self):
options = OptionParser()
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(SystemExit):
options.parse_command_line(["main.py", "--help"])
usage = sys.stderr.getvalue()
finally:
sys.stderr = orig_stderr
self.assertIn("Usage:", usage)
def test_subcommand(self):
base_options = OptionParser()
base_options.define("verbose", default=False)
sub_options = OptionParser()
sub_options.define("foo", type=str)
rest = base_options.parse_command_line(
["main.py", "--verbose", "subcommand", "--foo=bar"])
self.assertEqual(rest, ["subcommand", "--foo=bar"])
self.assertTrue(base_options.verbose)
rest2 = sub_options.parse_command_line(rest)
self.assertEqual(rest2, [])
self.assertEqual(sub_options.foo, "bar")
# the two option sets are distinct
try:
orig_stderr = sys.stderr
sys.stderr = StringIO()
with self.assertRaises(Error):
sub_options.parse_command_line(["subcommand", "--verbose"])
finally:
sys.stderr = orig_stderr
def test_setattr(self):
options = OptionParser()
options.define('foo', default=1, type=int)
options.foo = 2
self.assertEqual(options.foo, 2)
def test_setattr_type_check(self):
# setattr requires that options be the right type and doesn't
# parse from string formats.
options = OptionParser()
options.define('foo', default=1, type=int)
with self.assertRaises(Error):
options.foo = '2'
def test_setattr_with_callback(self):
values = []
options = OptionParser()
options.define('foo', default=1, type=int, callback=values.append)
options.foo = 2
self.assertEqual(values, [2])
def _sample_options(self):
options = OptionParser()
options.define('a', default=1)
options.define('b', default=2)
return options
def test_iter(self):
options = self._sample_options()
# OptionParsers always define 'help'.
self.assertEqual(set(['a', 'b', 'help']), set(iter(options)))
def test_getitem(self):
options = self._sample_options()
self.assertEqual(1, options['a'])
def test_setitem(self):
options = OptionParser()
options.define('foo', default=1, type=int)
options['foo'] = 2
self.assertEqual(options['foo'], 2)
def test_items(self):
options = self._sample_options()
# OptionParsers always define 'help'.
expected = [('a', 1), ('b', 2), ('help', options.help)]
actual = sorted(options.items())
self.assertEqual(expected, actual)
def test_as_dict(self):
options = self._sample_options()
expected = {'a': 1, 'b': 2, 'help': options.help}
self.assertEqual(expected, options.as_dict())
def test_group_dict(self):
options = OptionParser()
options.define('a', default=1)
options.define('b', group='b_group', default=2)
frame = sys._getframe(0)
this_file = frame.f_code.co_filename
self.assertEqual(set(['b_group', '', this_file]), options.groups())
b_group_dict = options.group_dict('b_group')
self.assertEqual({'b': 2}, b_group_dict)
self.assertEqual({}, options.group_dict('nonexistent'))
@unittest.skipIf(mock is None, 'mock package not present')
def test_mock_patch(self):
# ensure that our setattr hooks don't interfere with mock.patch
options = OptionParser()
options.define('foo', default=1)
options.parse_command_line(['main.py', '--foo=2'])
self.assertEqual(options.foo, 2)
with mock.patch.object(options.mockable(), 'foo', 3):
self.assertEqual(options.foo, 3)
self.assertEqual(options.foo, 2)
# Try nested patches mixed with explicit sets
with mock.patch.object(options.mockable(), 'foo', 4):
self.assertEqual(options.foo, 4)
options.foo = 5
self.assertEqual(options.foo, 5)
with mock.patch.object(options.mockable(), 'foo', 6):
self.assertEqual(options.foo, 6)
self.assertEqual(options.foo, 5)
self.assertEqual(options.foo, 2)
def test_types(self):
options = OptionParser()
options.define('str', type=str)
options.define('basestring', type=basestring_type)
options.define('int', type=int)
options.define('float', type=float)
options.define('datetime', type=datetime.datetime)
options.define('timedelta', type=datetime.timedelta)
options.parse_command_line(['main.py',
'--str=asdf',
'--basestring=qwer',
'--int=42',
'--float=1.5',
'--datetime=2013-04-28 05:16',
'--timedelta=45s'])
self.assertEqual(options.str, 'asdf')
self.assertEqual(options.basestring, 'qwer')
self.assertEqual(options.int, 42)
self.assertEqual(options.float, 1.5)
self.assertEqual(options.datetime,
datetime.datetime(2013, 4, 28, 5, 16))
self.assertEqual(options.timedelta, datetime.timedelta(seconds=45))
def test_multiple_string(self):
options = OptionParser()
options.define('foo', type=str, multiple=True)
options.parse_command_line(['main.py', '--foo=a,b,c'])
self.assertEqual(options.foo, ['a', 'b', 'c'])
def test_multiple_int(self):
options = OptionParser()
options.define('foo', type=int, multiple=True)
options.parse_command_line(['main.py', '--foo=1,3,5:7'])
self.assertEqual(options.foo, [1, 3, 5, 6, 7])
def test_error_redefine(self):
options = OptionParser()
options.define('foo')
with self.assertRaises(Error) as cm:
options.define('foo')
self.assertRegexpMatches(str(cm.exception),
'Option.*foo.*already defined')
def test_dash_underscore_cli(self):
# Dashes and underscores should be interchangeable.
for defined_name in ['foo-bar', 'foo_bar']:
for flag in ['--foo-bar=a', '--foo_bar=a']:
options = OptionParser()
options.define(defined_name)
options.parse_command_line(['main.py', flag])
# Attr-style access always uses underscores.
self.assertEqual(options.foo_bar, 'a')
# Dict-style access allows both.
self.assertEqual(options['foo-bar'], 'a')
self.assertEqual(options['foo_bar'], 'a')
def test_dash_underscore_file(self):
# No matter how an option was defined, it can be set with underscores
# in a config file.
for defined_name in ['foo-bar', 'foo_bar']:
options = OptionParser()
options.define(defined_name)
options.parse_config_file(os.path.join(os.path.dirname(__file__),
"options_test.cfg"))
self.assertEqual(options.foo_bar, 'a')
def test_dash_underscore_introspection(self):
# Original names are preserved in introspection APIs.
options = OptionParser()
options.define('with-dash', group='g')
options.define('with_underscore', group='g')
all_options = ['help', 'with-dash', 'with_underscore']
self.assertEqual(sorted(options), all_options)
self.assertEqual(sorted(k for (k, v) in options.items()), all_options)
self.assertEqual(sorted(options.as_dict().keys()), all_options)
self.assertEqual(sorted(options.group_dict('g')),
['with-dash', 'with_underscore'])
# --help shows CLI-style names with dashes.
buf = StringIO()
options.print_help(buf)
self.assertIn('--with-dash', buf.getvalue())
self.assertIn('--with-underscore', buf.getvalue())
| gpl-3.0 |
ryano144/intellij-community | python/lib/Lib/xml/etree/__init__.py | 183 | 1604 | # $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
| apache-2.0 |
auready/django | django/contrib/gis/db/backends/spatialite/schema.py | 33 | 6791 | from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.utils import DatabaseError
class SpatialiteSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_column = (
"SELECT AddGeometryColumn(%(table)s, %(column)s, %(srid)s, "
"%(geom_type)s, %(dim)s, %(null)s)"
)
sql_add_spatial_index = "SELECT CreateSpatialIndex(%(table)s, %(column)s)"
sql_drop_spatial_index = "DROP TABLE idx_%(table)s_%(column)s"
sql_recover_geometry_metadata = (
"SELECT RecoverGeometryColumn(%(table)s, %(column)s, %(srid)s, "
"%(geom_type)s, %(dim)s)"
)
sql_remove_geometry_metadata = "SELECT DiscardGeometryColumn(%(table)s, %(column)s)"
sql_discard_geometry_columns = "DELETE FROM %(geom_table)s WHERE f_table_name = %(table)s"
sql_update_geometry_columns = (
"UPDATE %(geom_table)s SET f_table_name = %(new_table)s "
"WHERE f_table_name = %(old_table)s"
)
geometry_tables = [
"geometry_columns",
"geometry_columns_auth",
"geometry_columns_time",
"geometry_columns_statistics",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
from django.contrib.gis.db.models.fields import GeometryField
if not isinstance(field, GeometryField):
return super().column_sql(model, field, include_default)
# Geometry columns are created by the `AddGeometryColumn` function
self.geometry_sql.append(
self.sql_add_geometry_column % {
"table": self.geo_quote_name(model._meta.db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
"null": int(not field.null),
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
return None, None
def remove_geometry_metadata(self, model, field):
self.execute(
self.sql_remove_geometry_metadata % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
self.execute(
self.sql_drop_spatial_index % {
"table": model._meta.db_table,
"column": field.column,
}
)
def create_model(self, model):
super().create_model(model)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def delete_model(self, model, **kwargs):
from django.contrib.gis.db.models.fields import GeometryField
# Drop spatial metadata (dropping the table does not automatically remove them)
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.remove_geometry_metadata(model, field)
# Make sure all geom stuff is gone
for geom_table in self.geometry_tables:
try:
self.execute(
self.sql_discard_geometry_columns % {
"geom_table": geom_table,
"table": self.quote_name(model._meta.db_table),
}
)
except DatabaseError:
pass
super().delete_model(model, **kwargs)
def add_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
if isinstance(field, GeometryField):
# Populate self.geometry_sql
self.column_sql(model, field)
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
else:
super().add_field(model, field)
def remove_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
# NOTE: If the field is a geometry field, the table is just recreated,
# the parent's remove_field can't be used cause it will skip the
# recreation if the field does not have a database type. Geometry fields
# do not have a db type cause they are added and removed via stored
# procedures.
if isinstance(field, GeometryField):
self._remake_table(model, delete_field=field)
else:
super().remove_field(model, field)
def alter_db_table(self, model, old_db_table, new_db_table):
from django.contrib.gis.db.models.fields import GeometryField
# Remove geometry-ness from temp table
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.execute(
self.sql_remove_geometry_metadata % {
"table": self.quote_name(old_db_table),
"column": self.quote_name(field.column),
}
)
# Alter table
super().alter_db_table(model, old_db_table, new_db_table)
# Repoint any straggler names
for geom_table in self.geometry_tables:
try:
self.execute(
self.sql_update_geometry_columns % {
"geom_table": geom_table,
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
}
)
except DatabaseError:
pass
# Re-add geometry-ness and rename spatial index tables
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.execute(self.sql_recover_geometry_metadata % {
"table": self.geo_quote_name(new_db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
})
if getattr(field, 'spatial_index', False):
self.execute(self.sql_rename_table % {
"old_table": self.quote_name("idx_%s_%s" % (old_db_table, field.column)),
"new_table": self.quote_name("idx_%s_%s" % (new_db_table, field.column)),
})
| bsd-3-clause |
endlessm/chromium-browser | third_party/shaderc/src/glslc/test/option_dash_M.py | 3 | 33038 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
import sys
from environment import File, Directory
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader
from glslc_test_framework import GlslCTest
MINIMAL_SHADER = '#version 140\nvoid main() {}'
EMPTY_SHADER_IN_CURDIR = Directory('.', [File('shader.vert', MINIMAL_SHADER)])
EMPTY_SHADER_IN_SUBDIR = Directory('subdir',
[File('shader.vert', MINIMAL_SHADER)])
def process_test_specified_dependency_info_rules(test_specified_rules):
"""A helper function to process the expected dependency info rules
specified in tests before checking the actual dependency rule output.
This is required because the filename and path of temporary files created
through FileShader is unknown at the time the expected dependency info rules
are declared.
Note this function process the given rule list in-place.
"""
for rule in test_specified_rules:
# If the 'target' value is not a hard-coded file name but a
# FileShader, we need its full path, append extension to it and
# strip the directory component from it to get the complete target
# name.
if isinstance(rule['target'], FileShader):
rule['target'] = rule['target'].filename
if 'target_extension' in rule:
if rule['target_extension'] is not None:
rule['target'] = rule['target'] + rule['target_extension']
rule.pop('target_extension')
rule['target'] = os.path.basename(rule['target'])
# The dependency set may have FileShader too, we need to replace
# them with their absolute paths.
dependent_file_name_set = set()
for dependent_file in rule['dependency']:
if isinstance(dependent_file, FileShader):
dependent_file_name_set.add(dependent_file.filename)
else:
dependent_file_name_set.add(dependent_file)
rule['dependency'] = dependent_file_name_set
def parse_text_rules(text_lines):
""" A helper function to read text lines and construct and returns a list of
dependency rules which can be used for comparison.
The list is built with the text order. Each rule is described in the
following way:
{'target': <target name>, 'dependency': <set of dependent filenames>}
"""
rules = []
for line in text_lines:
if line.strip() == "":
continue
rule = {'target': line.split(': ')[0].strip(),
'dependency': set(line.split(': ')[-1].strip().split(' '))}
rules.append(rule)
return rules
class DependencyInfoStdoutMatch(GlslCTest):
"""Mixin class for tests that can expect dependency info in Stdout.
To mix in this class, the subclass needs to provide
dependency_rules_expected as a list of dictionaries, each dictionary
describes one expected make rule for a target file. A expected rule should
be specified in the following way:
rule = {'target': <target name>,
'target_extension': <.spv, .spvasm or None>,
'dependency': <dependent file names>}
The 'target_extension' field is optional, its value will be appended to
'target' to get complete target name.
And the list 'dependency_rules_expected' is a list of such rules and the
order of the rules does matter.
"""
def check_stdout_dependency_info(self, status):
if not status.stdout:
return False, 'Expect dependency rules on stdout'
if sys.version_info[0] == 2:
rules = parse_text_rules(status.stdout.decode('utf-8').split('\n'))
elif sys.version_info[0] == 3:
rules = parse_text_rules(str(status.stdout,
encoding='utf-8',
errors='ignore').split('\n'))
process_test_specified_dependency_info_rules(
self.dependency_rules_expected)
if self.dependency_rules_expected != rules:
return False, ('Incorrect dependency info:\n{ac_rules}\n'
'Expected:\n{ex_rules}\n'
'Stdout output:\n{ac_stdout}\n'.format(
ac_rules=rules,
ex_rules=self.dependency_rules_expected,
ac_stdout=status.stdout))
return True, ''
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathNoInclude(DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in relative path.
e.g. glslc -M shader.vert
=> shader.vert.spv: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert']
dependency_rules_expected = [{'target': "shader.vert.spv",
'dependency': {"shader.vert"}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputAbsolutePathNoInclude(DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in absolute path.
e.g. glslc -M /usr/local/shader.vert
=> shader.vert.spv: /usr/local/shader.vert
"""
shader = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-M', shader]
dependency_rules_expected = [{'target': shader,
'target_extension': '.spv',
'dependency': {shader}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does contain #include and is
represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert b.vert
"""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.vert"\nvoid main(){}\n'),
File('b.vert', 'void foo(){}\n'),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithIncludeSubdir(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does #include another file in a
subdirectory of current directory and is represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert include/b.vert
"""
environment = Directory('.', [
File('a.vert', ('#version 140\n#include "include/b.vert"'
'\nvoid main(){}\n')),
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'include/b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithDashI(DependencyInfoStdoutMatch):
"""Tests -M with single input file works with -I option. The #include
directive does not specify 'include/' for the file to be include.
e.g. glslc -M a.vert -I include
=> a.vert.spv: a.vert include/b.vert
"""
environment = Directory('.', [
File('a.vert', ('#version 140\n#include "b.vert"'
'\nvoid main(){}\n')),
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
glslc_args = ['-M', 'a.vert', '-I', 'include']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert', 'include/b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputRelativePathWithNestedInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file under nested #include case. The input file
is represented in relative path.
e.g. glslc -M a.vert
=> a.vert.spv: a.vert b.vert c.vert
"""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.vert"\nvoid main(){}\n'),
File('b.vert', 'void foo(){}\n#include "c.vert"\n'),
File('c.vert', 'void bar(){}\n'),
])
glslc_args = ['-M', 'a.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency':
{'a.vert', 'b.vert', 'c.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMultipleInputRelativePathNoInclude(
DependencyInfoStdoutMatch):
"""Tests -M with multiple input file which don't contain #include and are
represented in relative paths.
e.g. glslc -M a.vert b.vert
=> a.vert.spv: a.vert
b.vert.spv: b.vert
"""
environment = Directory('.', [
File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER),
])
glslc_args = ['-M', 'a.vert', 'b.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert'}},
{'target': 'b.vert.spv',
'dependency': {'b.vert'}}, ]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMultipleInputAbsolutePathNoInclude(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which doesn't contain #include and is
represented in absolute path.
e.g. glslc -M /usr/local/a.vert /usr/local/b.vert
=> a.vert.spv: /usr/local/a.vert
b.vert.spv: /usr/local/b.vert
"""
shader_a = FileShader(MINIMAL_SHADER, '.vert')
shader_b = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-M', shader_a, shader_b]
dependency_rules_expected = [{'target': shader_a,
'target_extension': '.spv',
'dependency': {shader_a}},
{'target': shader_b,
'target_extension': '.spv',
'dependency': {shader_b}}, ]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDashCapMT(DependencyInfoStdoutMatch):
"""Tests -MT works with -M. User can specify the target object name in the
generated dependency info.
e.g. glslc -M shader.vert -MT target
=> target: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert', '-MT', 'target']
dependency_rules_expected = [{'target': 'target',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMInputAbsolutePathWithInclude(DependencyInfoStdoutMatch):
"""Tests -M have included files represented in absolute paths when the input
file is represented in absolute path.
E.g. Assume a.vert has '#include "b.vert"'
glslc -M /usr/local/a.vert
=> a.vert.spv: /usr/local/a.vert /usr/local/b.vert
"""
environment = Directory('.', [File('b.vert', 'void foo(){}\n')])
shader_main = FileShader(
'#version 140\n#include "b.vert"\nvoid main(){}\n', '.vert')
glslc_args = ['-M', shader_main]
dependency_rules_expected = [{
'target': shader_main,
'target_extension': '.spv',
'dependency': {shader_main}
# The dependency here is not complete. we can not get the absolute path
# of b.vert here. It will be added in check_stdout_dependency_info()
}]
def check_stdout_dependency_info(self, status):
# Add the absolute path of b.vert to the dependency set
self.dependency_rules_expected[0]['dependency'].add(os.path.dirname(
self.shader_main.filename) + '/b.vert')
return DependencyInfoStdoutMatch.check_stdout_dependency_info(self,
status)
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMSingleInputAbsolutePathWithIncludeSubdir(
DependencyInfoStdoutMatch):
"""Tests -M with single input file which does #include another file in a
subdirectory of current directory and is represented in absolute path.
e.g. glslc -M /usr/local/a.vert
=> a.vert.spv: /usr/local/a.vert /usr/local/include/b.vert
"""
environment = Directory('.', [
Directory('include', [File('b.vert', 'void foo(){}\n')]),
])
shader_main = FileShader('#version 140\n#include "include/b.vert"\n',
'.vert')
glslc_args = ['-M', shader_main]
dependency_rules_expected = [{
'target': shader_main,
'target_extension': '.spv',
'dependency': {shader_main}
# The dependency here is not complete. we can not get the absolute
# path of include/b.vert here. It will be added in
# check_stdout_dependency_info()
}]
def check_stdout_dependency_info(self, status):
# Add the absolute path of include/b.vert to the dependency set
self.dependency_rules_expected[0]['dependency'].add(os.path.dirname(
self.shader_main.filename) + '/include/b.vert')
return DependencyInfoStdoutMatch.check_stdout_dependency_info(self,
status)
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMOverridesOtherModes(DependencyInfoStdoutMatch):
"""Tests -M overrides other compiler mode options, includeing -E, -c and -S.
"""
environment = Directory('.', [
File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER),
])
glslc_args = ['-M', '-E', '-c', '-S', 'a.vert', 'b.vert']
dependency_rules_expected = [{'target': 'a.vert.spv',
'dependency': {'a.vert'}},
{'target': 'b.vert.spv',
'dependency': {'b.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMEquivalentToCapM(DependencyInfoStdoutMatch):
"""Tests that -MM behaves as -M.
e.g. glslc -MM shader.vert
=> shader.vert.spv: shader.vert
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MM', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMImpliesDashCapE(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -E, a .glsl file without an explict stage should
not generate an error.
e.g. glslc -M shader.glsl
=> shader.spv: shader.glsl
<no error message should be generated>
"""
environment = Directory('.', [File('shader.glsl', MINIMAL_SHADER)])
glslc_args = ['-M', 'shader.glsl']
dependency_rules_expected = [{'target': 'shader.spv',
'dependency': {'shader.glsl'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMImpliesDashW(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -w, a deprecated attribute should not generate
warning message.
e.g. glslc -M shader.vert
=> shader.vert.spv: shader.vert
<no warning message should be generated>
"""
environment = Directory('.', [File(
'shader.vert', """#version 400
layout(location=0) attribute float x;
void main() {}""")])
glslc_args = ['-M', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMImpliesDashCapE(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -M implies -E, a .glsl file without an explict stage should
not generate an error.
e.g. glslc -MM shader.glsl
=> shader.spv: shader.glsl
<no error message should be generated>
"""
environment = Directory('.', [File('shader.glsl', MINIMAL_SHADER)])
glslc_args = ['-MM', 'shader.glsl']
dependency_rules_expected = [{'target': 'shader.spv',
'dependency': {'shader.glsl'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMMImpliesDashW(DependencyInfoStdoutMatch,
expect.NoOutputOnStderr):
"""Tests that -MM implies -w, a deprecated attribute should not generate
warning message.
e.g. glslc -MM shader.vert
=> shader.vert.spv: shader.vert
<no warning message should be generated>
"""
environment = Directory('.', [File(
'shader.vert', """
#version 400
layout(location = 0) attribute float x;
void main() {}""")])
glslc_args = ['-MM', 'shader.vert']
dependency_rules_expected = [{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}]
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMD(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MD generates dependency info file and compilation output.
e.g. glslc -MD shader.vert
=> <a.spv: valid SPIR-V object file>
=> <shader.vert.spv.d: dependency info>
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert']
expected_object_filenames = ('a.spv', )
target_filename = 'shader.vert.spv.d'
expected_file_contents = ['shader.vert.spv: shader.vert\n']
class DependencyInfoFileMatch(GlslCTest):
"""Mixin class for tests that can expect dependency info files.
To mix in this class, subclasses need to provide dependency_info_filenames
and dependency_info_files_expected_contents which are two lists.
list dependency_info_filenames contains the dependency info file names and
list dependency_info_files_expected_contents contains the expected matching
dependency rules.
The item order of the two lists should match, which means:
dependency_info_files_expected_contents[i] should describe the
dependency rules saved in dependency_info_filenames[i]
The content of each dependency info file is described in same 'list of dict'
structure explained in class DependencyInfoStdoutMatch's doc string.
"""
def check_dependency_info_files(self, status):
dep_info_files = \
[os.path.join(status.directory,
f) for f in self.dependency_info_filenames]
for i, df in enumerate(dep_info_files):
if not os.path.isfile(df):
return False, 'Cannot find file: ' + df
try:
with open(df, 'r') as dff:
content = dff.read()
rules = parse_text_rules(content.split('\n'))
process_test_specified_dependency_info_rules(
self.dependency_info_files_expected_contents[i])
if self.dependency_info_files_expected_contents[
i] != rules:
return False, (
'Incorrect dependency info:\n{ac_rules}\n'
'Expected:\n{ex_rules}\n'
'Incorrect file output:\n{ac_out}\n'
'Incorrect dependency info file:\n{ac_file}\n'.format(
ac_rules=rules,
ex_rules=self.dependency_rules_expected,
ac_stdout=content,
ac_file=df))
except IOError:
return False, ('Could not open dependency info file ' + df +
' for reading')
return True, ''
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMWorksWithDashO(DependencyInfoFileMatch):
"""Tests -M works with -o option. When user specifies an output file name
with -o, the dependency info should be dumped to the user specified output
file.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', 'shader.vert', '-o', 'dep_info']
dependency_info_filenames = ('dep_info', )
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append(
[{'target': 'shader.vert.spv',
'dependency': {'shader.vert'}}])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFile(expect.ValidNamedObjectFile,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files.
e.g. glslc -MD a.vert b.vert -c
=> <a.vert.spv: valid SPIR-V object file>
=> <a.vert.spv.d: dependency info: "a.vert.spv: a.vert">
=> <b.vert.spv: valid SPIR-V object file>
=> <b.vert.spv.d: dependency info: "b.vert.spv: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-c']
expected_object_filenames = ('a.vert.spv', 'b.vert.spv', )
dependency_info_filenames = ['a.vert.spv.d', 'b.vert.spv.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spv',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spv',
'dependency': {'b.vert'}}
])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFilePreprocessingOnlyMode(expect.StdoutMatch,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files in
preprocessing only mode.
e.g. glslc -MD a.vert b.vert -E
=> stdout: preprocess result of a.vert and b.vert
=> <a.vert.spv.d: dependency info: "a.vert.spv: a.vert">
=> <b.vert.spv.d: dependency info: "b.vert.spv: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-E']
dependency_info_filenames = ['a.vert.spv.d', 'b.vert.spv.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spv',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spv',
'dependency': {'b.vert'}}
])
expected_stdout = ("#version 140\nvoid main(){ }\n"
"#version 140\nvoid main(){ }\n")
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDMultipleFileDisassemblyMode(expect.ValidNamedAssemblyFile,
DependencyInfoFileMatch):
"""Tests that -MD generates dependency info file for multiple files in
disassembly mode.
e.g. glslc -MD a.vert b.vert -S
=> <a.vert.spvasm: valid SPIR-V assembly file>
=> <a.vert.spvasm.d: dependency info: "a.vert.spvasm: a.vert">
=> <b.vert.spvasm: valid SPIR-V assembly file>
=> <b.vert.spvasm.d: dependency info: "b.vert.spvasm: b.vert">
"""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-S']
expected_assembly_filenames = ('a.vert.spvasm', 'b.vert.spvasm', )
dependency_info_filenames = ['a.vert.spvasm.d', 'b.vert.spvasm.d']
dependency_info_files_expected_contents = []
dependency_info_files_expected_contents.append([{'target': 'a.vert.spvasm',
'dependency': {'a.vert'}}
])
dependency_info_files_expected_contents.append([{'target': 'b.vert.spvasm',
'dependency': {'b.vert'}}
])
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMT(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MT generates dependency info file with specified target label.
e.g. glslc -MD shader.vert -MT target_label
=> <a.spv: valid SPIR-V object file>
=> <shader.vert.spv.d: dependency info: "target_label: shader.vert">
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert', '-MT', 'target_label']
expected_object_filenames = ('a.spv', )
target_filename = 'shader.vert.spv.d'
expected_file_contents = ['target_label: shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMF(expect.ValidFileContents, expect.ValidNamedObjectFile):
"""Tests that -MF dumps dependency info into specified file.
e.g. glslc -MD shader.vert -MF dep_file
=> <a.spv: valid SPIR-V object file>
=> <dep_file: dependency info: "shader.vert.spv: shader.vert">
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', 'shader.vert', '-MF', 'dep_file']
expected_object_filenames = ('a.spv', )
target_filename = 'dep_file'
expected_file_contents = ['shader.vert.spv: shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDSpecifyOutputFileName(expect.ValidFileContents,
expect.ValidNamedObjectFile):
"""Tests that -MD has the default dependency info file name and target
label correct when -o <output_file_name> appears in the command line.
The default dependency info file name and target label should be deduced
from the linking-disabled compilation output.
e.g. glslc -MD subdir/shader.vert -c -o output
=> <./output: valid SPIR-V object file>
=> <./output.d: dependency info: "output: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-c', '-o', 'output']
expected_object_filenames = ('output', )
target_filename = 'output.d'
expected_file_contents = ['output: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDWithDashMFDashMTDashO(expect.ValidFileContents,
expect.ValidNamedObjectFile):
"""Tests that -MD, -MF, -MT and -o gernates dependency info file and
compilation output file correctly
e.g. glslc -MD subdir/shader.vert -c -o subdir/out -MF dep_info -MT label
=> <subdir/out: valid SPIR-V object file>
=> <dep_info: dependency info: "label: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-c', '-o', 'subdir/out', '-MF',
'dep_info', '-MT', 'label']
expected_object_filenames = ('subdir/out', )
target_filename = 'dep_info'
expected_file_contents = ['label: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestDashCapMDWithDashMFDashMTDashODisassemblyMode(
expect.ValidFileContents, expect.ValidNamedAssemblyFile):
"""Tests that -MD, -MF, -MT and -o gernates dependency info file and
compilation output file correctly in disassembly mode
e.g. glslc -MD subdir/shader.vert -s -o subdir/out -MF dep_info -MT label
=> <subdir/out: valid SPIR-V object file>
=> <dep_info: dependency info: "label: shader.vert">
"""
environment = EMPTY_SHADER_IN_SUBDIR
glslc_args = ['-MD', 'subdir/shader.vert', '-S', '-o', 'subdir/out', '-MF',
'dep_info', '-MT', 'label']
expected_assembly_filenames = ('subdir/out', )
target_filename = 'dep_info'
expected_file_contents = ['label: subdir/shader.vert\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorSetBothDashCapMAndDashCapMD(expect.StderrMatch):
"""Tests that when both -M (or -MM) and -MD are specified, glslc should exit
with an error message complaining the case and neither dependency info
output nor compilation output. This test has -MD before -M flag.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MD', '-M', 'shader.vert']
expected_stderr = ['glslc: error: both -M (or -MM) and -MD are specified. '
'Only one should be used at one time.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorSetBothDashCapMDAndDashCapM(expect.StderrMatch):
"""Tests that when both -M (or -MM) and -MD are specified, glslc should exit
with an error message complaining the case and neither dependency info
output nor compilation output. This test has -M before -MD flag.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-M', '-MD', 'shader.vert']
expected_stderr = ['glslc: error: both -M (or -MM) and -MD are specified. '
'Only one should be used at one time.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMFWithMultipleInputFiles(expect.StderrMatch):
"""Tests that when -MF option is specified, only one input file should be
provided."""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-MD', 'a.vert', 'b.vert', '-c', '-MF', 'dep_info']
expected_stderr = ['glslc: error: '
'to specify dependency info file name or dependency '
'info target, only one input file is allowed.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMTWithMultipleInputFiles(expect.StderrMatch):
"""Tests that when -MT option is specified, only one input file should be
provided."""
environment = Directory('.', [File('a.vert', MINIMAL_SHADER),
File('b.vert', MINIMAL_SHADER)])
glslc_args = ['-M', 'a.vert', 'b.vert', '-c', '-MT', 'target']
expected_stderr = ['glslc: error: '
'to specify dependency info file name or dependency '
'info target, only one input file is allowed.\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMFMissingDashMAndDashMD(expect.StderrMatch):
"""Tests that when only -MF is specified while -M and -MD are not specified,
glslc should emit an error complaining that the user must specifiy either
-M (-MM) or -MD to generate dependency info.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MF', 'dep_info', 'shader.vert', '-c']
expected_stderr = ['glslc: error: '
'to generate dependencies you must specify either -M '
'(-MM) or -MD\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorDashCapMTMissingDashMAndMDWith(expect.StderrMatch):
"""Tests that when only -MF and -MT is specified while -M and -MD are not
specified, glslc should emit an error complaining that the user must
specifiy either -M (-MM) or -MD to generate dependency info.
"""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['-MF', 'dep_info', '-MT', 'target', 'shader.vert', '-c']
expected_stderr = ['glslc: error: '
'to generate dependencies you must specify either -M '
'(-MM) or -MD\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorMissingDependencyInfoFileName(expect.StderrMatch):
"""Tests that dependency file name is missing when -MF is specified."""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['target', 'shader.vert', '-c', '-MF']
expected_stderr = ['glslc: error: '
'missing dependency info filename after \'-MF\'\n']
@inside_glslc_testsuite('OptionsCapM')
class TestErrorMissingDependencyTargetName(expect.StderrMatch):
"""Tests that dependency target name is missing when -MT is specified."""
environment = EMPTY_SHADER_IN_CURDIR
glslc_args = ['target', 'shader.vert', '-c', '-MT']
expected_stderr = ['glslc: error: '
'missing dependency info target after \'-MT\'\n']
| bsd-3-clause |
srepho/BDA_py_demos | demos_ch10/demo10_1.py | 19 | 4102 | """Bayesian data analysis
Chapter 10, demo 1
Rejection sampling example
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=0)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
plt.rc('patch', facecolor='#bfe2ff')
# fake interesting distribution
x = np.linspace(-3, 3, 200)
r = np.array([ 1.1 , 1.3 , -0.1 , -0.7 , 0.2 , -0.4 , 0.06, -1.7 ,
1.7 , 0.3 , 0.7 , 1.6 , -2.06, -0.74, 0.2 , 0.5 ])
# Estimate the density (named q, to emphesize that it does not need to be
# normalized). Parameter bw_method=0.48 is used to mimic the outcome of the
# kernelp function in Matlab.
q = stats.gaussian_kde(r, bw_method=0.48).evaluate(x)
# rejection sampling example
g_mean = 0
g_std = 1.1
g = stats.norm.pdf(x, loc=g_mean, scale=g_std)
# M is computed by discrete approximation
M = np.max(q/g)
# prescale
g *= M
# plot the densities
plt.figure()
plt.plot(x, q)
plt.plot(x, g, linestyle='--')
plt.fill_between(x, q)
plt.legend((r'$q(\theta|y)$', r'$Mg(\theta)$'))
plt.yticks(())
plt.title('Rejection sampling')
plt.ylim([0, 1.1*g.max()])
# illustrate one sample
r1 = -0.8
zi = np.argmin(np.abs(x-r1)) # find the closest grid point
plt.plot((x[zi], x[zi]), (0, q[zi]), color='gray')
plt.plot((x[zi], x[zi]), (q[zi], g[zi]), color='gray', linestyle='--')
r21 = 0.3 * g[zi]
r22 = 0.8 * g[zi]
plt.plot(r1, r21, marker='o', color='#4daf4a', markersize=12)
plt.plot(r1, r22, marker='o', color='#e41a1c', markersize=12)
# add annotations
plt.text(x[zi], q[zi], r'$\leftarrow \, q(\theta=r|y)$', fontsize=18)
plt.text(x[zi], g[zi], r'$\leftarrow \, g(\theta=r)$', fontsize=18)
plt.text(r1-0.1, r21, 'accepted', horizontalalignment='right')
plt.text(r1-0.1, r22, 'rejected', horizontalalignment='right')
# get nsamp samples
nsamp = 200
r1 = stats.norm.rvs(size=nsamp, loc=g_mean, scale=g_std)
zi = np.argmin(np.abs(x[:,None] - r1), axis=0)
r2 = np.random.rand(nsamp) * g[zi]
acc = r2 < q[zi]
# plot the densities againg
plotgrid = mpl.gridspec.GridSpec(2, 1, height_ratios=[5,1])
fig = plt.figure()
ax0 = plt.subplot(plotgrid[0])
plt.plot(x, q)
plt.plot(x, g, linestyle='--')
plt.fill_between(x, q)
plt.xticks(())
plt.yticks(())
plt.title('Rejection sampling')
plt.ylim([0, 1.1*g.max()])
plt.xlim((x[0],x[-1]))
# the samples
plt.scatter(r1[~acc], r2[~acc], 40, color='#ff999a')
plt.scatter(r1[acc], r2[acc], 40, color='#4daf4a')
plt.legend((r'$q(\theta|y)$', r'$Mg(\theta)$', 'rejected', 'accepted'))
# only accepted samples
ax1 = plt.subplot(plotgrid[1])
plt.scatter(r1[acc], np.ones(np.count_nonzero(acc)), 40, color='#4daf4a', alpha=0.3)
plt.yticks(())
plt.xlim((x[0],x[-1]))
# add inter-axis lines
transf = fig.transFigure.inverted()
for i in range(nsamp):
if acc[i] and x[0] < r1[i] and r1[i] < x[-1]:
coord1 = transf.transform(ax0.transData.transform([r1[i], r2[i]]))
coord2 = transf.transform(ax1.transData.transform([r1[i], 1]))
fig.lines.append(mpl.lines.Line2D(
(coord1[0], coord2[0]),
(coord1[1], coord2[1]),
transform=fig.transFigure,
alpha=0.2
))
# alternative proposal distribution
g = np.empty(x.shape)
g[x <= -1.5] = np.linspace(q[0], np.max(q[x<=-1.5]), len(x[x<=-1.5]))
g[(x > -1.5) & (x <= 0.2)] = np.linspace(
np.max(q[x<=-1.5]),
np.max(q[(x>-1.5) & (x<=0.2)]),
len(x[(x>-1.5) & (x<=0.2)])
)
g[(x > 0.2) & (x <= 2.3)] = np.linspace(
np.max(q[(x>-1.5) & (x<=0.2)]),
np.max(q[x>2.3]),
len(x[(x>0.2) & (x<=2.3)])
)
g[x > 2.3] = np.linspace(np.max(q[x>2.3]), q[-1], len(x[x>2.3]))
M = np.max(q/g)
g *= M
# plot
plt.figure()
plt.plot(x, q)
plt.plot(x, g, linestyle='--')
plt.fill_between(x, q)
plt.legend((r'$q(\theta|y)$', r'$Mg(\theta)$'))
plt.yticks(())
plt.title('Rejection sampling - alternative proposal distribution')
plt.ylim([0, 1.1*g.max()])
plt.show()
| gpl-3.0 |
jctanner/ansible | test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py | 47 | 5314 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos lldp_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from re import findall, search, M
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
utils,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_interfaces.lldp_interfaces import (
Lldp_interfacesArgs,
)
class Lldp_interfacesFacts(object):
""" The vyos lldp_interfaces fact class
"""
def __init__(self, module, subspec="config", options="options"):
self._module = module
self.argument_spec = Lldp_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lldp_interfaces
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
data = connection.get_config()
objs = []
lldp_names = findall(r"^set service lldp interface (\S+)", data, M)
if lldp_names:
for lldp in set(lldp_names):
lldp_regex = r" %s .+$" % lldp
cfg = findall(lldp_regex, data, M)
obj = self.render_config(cfg)
obj["name"] = lldp.strip("'")
if obj:
objs.append(obj)
facts = {}
if objs:
facts["lldp_interfaces"] = objs
ansible_facts["ansible_network_resources"].update(facts)
ansible_facts["ansible_network_resources"].update(facts)
return ansible_facts
def render_config(self, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = {}
location = {}
civic_conf = "\n".join(filter(lambda x: ("civic-based" in x), conf))
elin_conf = "\n".join(filter(lambda x: ("elin" in x), conf))
coordinate_conf = "\n".join(
filter(lambda x: ("coordinate-based" in x), conf)
)
disable = "\n".join(filter(lambda x: ("disable" in x), conf))
coordinate_based_conf = self.parse_attribs(
["altitude", "datum", "longitude", "latitude"], coordinate_conf
)
elin_based_conf = self.parse_lldp_elin_based(elin_conf)
civic_based_conf = self.parse_lldp_civic_based(civic_conf)
if disable:
config["enable"] = False
if coordinate_conf:
location["coordinate_based"] = coordinate_based_conf
config["location"] = location
elif civic_based_conf:
location["civic_based"] = civic_based_conf
config["location"] = location
elif elin_conf:
location["elin"] = elin_based_conf
config["location"] = location
return utils.remove_empties(config)
def parse_attribs(self, attribs, conf):
config = {}
for item in attribs:
value = utils.parse_conf_arg(conf, item)
if value:
value = value.strip("'")
if item == "altitude":
value = int(value)
config[item] = value
else:
config[item] = None
return utils.remove_empties(config)
def parse_lldp_civic_based(self, conf):
civic_based = None
if conf:
civic_info_list = []
civic_add_list = findall(r"^.*civic-based ca-type (.+)", conf, M)
if civic_add_list:
for civic_add in civic_add_list:
ca = civic_add.split(" ")
c_add = {}
c_add["ca_type"] = int(ca[0].strip("'"))
c_add["ca_value"] = ca[2].strip("'")
civic_info_list.append(c_add)
country_code = search(
r"^.*civic-based country-code (.+)", conf, M
)
civic_based = {}
civic_based["ca_info"] = civic_info_list
civic_based["country_code"] = country_code.group(1).strip("'")
return civic_based
def parse_lldp_elin_based(self, conf):
elin_based = None
if conf:
e_num = search(r"^.* elin (.+)", conf, M)
elin_based = e_num.group(1).strip("'")
return elin_based
| gpl-3.0 |
tanium/pytan | EXAMPLES/PYTAN_API/export_basetype_csv_with_sort_list.py | 1 | 6607 | #!/usr/bin/env python
"""
Export a BaseType from getting objects as CSV with name and description for header_sort
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["export_format"] = u'csv'
kwargs["header_sort"] = [u'name', u'description']
# setup the arguments for handler.get()
get_kwargs = {
'name': [
"Computer Name", "IP Route Details", "IP Address",
'Folder Contents',
],
'objtype': 'sensor',
}
# get the objects that will provide the basetype that we want to export
print "...CALLING: handler.get() with args: {}".format(get_kwargs)
response = handler.get(**get_kwargs)
# store the basetype object as the obj we want to export
kwargs['obj'] = response
# export the object to a string
# (we could just as easily export to a file using export_to_report_file)
print "...CALLING: handler.export_obj() with args {}".format(kwargs)
out = handler.export_obj(**kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: print the export_str returned from export_obj():"
print out
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.get() with args: {'objtype': 'sensor', 'name': ['Computer Name', 'IP Route Details', 'IP Address', 'Folder Contents']}
...CALLING: handler.export_obj() with args {'export_format': u'csv', 'obj': <taniumpy.object_types.sensor_list.SensorList object at 0x102f03a90>, 'header_sort': [u'name', u'description']}
...OUTPUT: print the export_str returned from export_obj():
name,description,category,creation_time,delimiter,exclude_from_parse_flag,hash,hidden_flag,id,ignore_case_flag,last_modified_by,max_age_seconds,metadata_item_0_admin_flag,metadata_item_0_name,metadata_item_0_value,modification_time,parameter_definition,queries_query_0_platform,queries_query_0_script,queries_query_0_script_type,queries_query_1_platform,queries_query_1_script,queries_query_1_script_type,queries_query_2_platform,queries_query_2_script,queries_query_2_script_type,queries_query_3_platform,queries_query_3_script,queries_query_3_script_type,queries_query_4_platform,queries_query_4_script,queries_query_4_script_type,source_id,string_count,subcolumns_subcolumn_0_hidden_flag,subcolumns_subcolumn_0_ignore_case_flag,subcolumns_subcolumn_0_index,subcolumns_subcolumn_0_name,subcolumns_subcolumn_0_value_type,subcolumns_subcolumn_1_hidden_flag,subcolumns_subcolumn_1_ignore_case_flag,subcolumns_subcolumn_1_index,subcolumns_subcolumn_1_name,subcolumns_subcolumn_1_value_type,subcolumns_subcolumn_2_hidden_flag,subcolumns_subcolumn_2_ignore_case_flag,subcolumns_subcolumn_2_index,subcolumns_subcolumn_2_name,subcolumns_subcolumn_2_value_type,subcolumns_subcolumn_3_hidden_flag,subcolumns_subcolumn_3_ignore_case_flag,subcolumns_subcolumn_3_index,subcolumns_subcolumn_3_name,subcolumns_subcolumn_3_value_type,subcolumns_subcolumn_4_hidden_flag,subcolumns_subcolumn_4_ignore_case_flag,subcolumns_subcolumn_4_index,subcolumns_subcolumn_4_name,subcolumns_subcolumn_4_value_type,subcolumns_subcolumn_5_hidden_flag,subcolumns_subcolumn_5_ignore_case_flag,subcolumns_subcolumn_5_index,subcolumns_subcolumn_5_name,subcolumns_subcolumn_5_value_type,value_type
Computer Name,"The assigned name of the client machine.
Example: workstation-1.company.com",Reserved,,,0,3409330187,0,3,1,,86400,,,,,,Windows,select CSName from win32_operatingsystem,WMIQuery,,,,,,,,,,,,,0,5,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,String
IP Route Details,"Returns IPv4 network routes, filtered to exclude noise. With Flags, Metric, Interface columns.
Example: 172.16.0.0|192.168.1.1|255.255.0.0|UG|100|eth0",Network,2015-09-14T13:39:12,|,1,435227963,0,568,1,Administrator,60,0,defined,Tanium,2015-09-14T13:39:12,,Windows,"strComputer = "."
Set objWMIService = GetObject("winmgmts:" _
& "{impersonationLevel=impersonate}!\\" & strComputer & "\root\cimv2")
Set collip = objWMIService.ExecQuery("select * from win32_networkadapterconfiguration where IPEnabled='True'")
dim ipaddrs()
ipcount = 0
for each ipItem in collip
for each ipaddr in ipItem.IPAddress
ipcount = ipcount + 1
next
..trimmed for brevity..
'''
'''STDERR from running this:
'''
| mit |
vijaysbhat/incubator-airflow | airflow/utils/email.py | 22 | 4195 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
import importlib
import logging
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.utils import formatdate
from airflow import configuration
from airflow.exceptions import AirflowConfigException
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed'):
"""
Send email using backend specified in EMAIL_BACKEND.
"""
path, attr = configuration.get('email', 'EMAIL_BACKEND').rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, attr)
return backend(to, subject, html_content, files=files, dryrun=dryrun, cc=cc, bcc=bcc, mime_subtype=mime_subtype)
def send_email_smtp(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed'):
"""
Send an email with html content
>>> send_email('test@example.com', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True)
"""
SMTP_MAIL_FROM = configuration.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = SMTP_MAIL_FROM
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
part = MIMEApplication(
f.read(),
Name=basename
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename
msg.attach(part)
send_MIME_email(SMTP_MAIL_FROM, recipients, msg, dryrun)
def send_MIME_email(e_from, e_to, mime_msg, dryrun=False):
SMTP_HOST = configuration.get('smtp', 'SMTP_HOST')
SMTP_PORT = configuration.getint('smtp', 'SMTP_PORT')
SMTP_STARTTLS = configuration.getboolean('smtp', 'SMTP_STARTTLS')
SMTP_SSL = configuration.getboolean('smtp', 'SMTP_SSL')
SMTP_USER = None
SMTP_PASSWORD = None
try:
SMTP_USER = configuration.get('smtp', 'SMTP_USER')
SMTP_PASSWORD = configuration.get('smtp', 'SMTP_PASSWORD')
except AirflowConfigException:
logging.debug("No user/password found for SMTP, so logging in with no authentication.")
if not dryrun:
s = smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT) if SMTP_SSL else smtplib.SMTP(SMTP_HOST, SMTP_PORT)
if SMTP_STARTTLS:
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info("Sent an alert email to " + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
def get_email_address_list(address_string):
if isinstance(address_string, basestring):
if ',' in address_string:
address_string = address_string.split(',')
elif ';' in address_string:
address_string = address_string.split(';')
else:
address_string = [address_string]
return address_string
| apache-2.0 |
alexston/calibre-webserver | src/calibre/ebooks/metadata/worker.py | 6 | 11994 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from threading import Thread
from Queue import Empty
import os, time, sys, shutil, json
from calibre.utils.ipc.job import ParallelJob
from calibre.utils.ipc.server import Server
from calibre.ptempfile import PersistentTemporaryDirectory, TemporaryDirectory
from calibre import prints, isbytestring
from calibre.constants import filesystem_encoding
from calibre.db.errors import NoSuchFormat
def debug(*args):
prints(*args)
sys.stdout.flush()
def serialize_metadata_for(formats, tdir, id_):
from calibre.ebooks.metadata.meta import metadata_from_formats
from calibre.ebooks.metadata.opf2 import metadata_to_opf
mi = metadata_from_formats(formats)
mi.cover = None
cdata = None
if mi.cover_data:
cdata = mi.cover_data[-1]
mi.cover_data = None
if not mi.application_id:
mi.application_id = '__calibre_dummy__'
with open(os.path.join(tdir, '%s.opf'%id_), 'wb') as f:
f.write(metadata_to_opf(mi, default_lang='und'))
if cdata:
with open(os.path.join(tdir, str(id_)), 'wb') as f:
f.write(cdata)
def read_metadata_(task, tdir, notification=lambda x,y:x):
with TemporaryDirectory() as mdir:
do_read_metadata(task, tdir, mdir, notification)
def do_read_metadata(task, tdir, mdir, notification):
from calibre.customize.ui import run_plugins_on_import
for x in task:
try:
id_, formats = x
except:
continue
try:
if isinstance(formats, basestring): formats = [formats]
import_map = {}
fmts, metadata_fmts = [], []
for format in formats:
mfmt = format
name, ext = os.path.splitext(os.path.basename(format))
nfp = run_plugins_on_import(format)
if not nfp or nfp == format or not os.access(nfp, os.R_OK):
nfp = None
else:
# Ensure that the filename is preserved so that
# reading metadata from filename is not broken
nfp = os.path.abspath(nfp)
nfext = os.path.splitext(nfp)[1]
mfmt = os.path.join(mdir, name + nfext)
shutil.copyfile(nfp, mfmt)
metadata_fmts.append(mfmt)
fmts.append(nfp)
serialize_metadata_for(metadata_fmts, tdir, id_)
for format, nfp in zip(formats, fmts):
if not nfp:
continue
if isinstance(nfp, unicode):
nfp.encode(filesystem_encoding)
x = lambda j : os.path.abspath(os.path.normpath(os.path.normcase(j)))
if x(nfp) != x(format) and os.access(nfp, os.R_OK|os.W_OK):
fmt = os.path.splitext(format)[1].replace('.', '').lower()
nfmt = os.path.splitext(nfp)[1].replace('.', '').lower()
dest = os.path.join(tdir, '%s.%s'%(id_, nfmt))
shutil.copyfile(nfp, dest)
import_map[fmt] = dest
if import_map:
with open(os.path.join(tdir, str(id_)+'.import'), 'wb') as f:
for fmt, nfp in import_map.items():
f.write(fmt+':'+nfp+'\n')
notification(0.5, id_)
except:
import traceback
with open(os.path.join(tdir, '%s.error'%id_), 'wb') as f:
f.write(traceback.format_exc())
class Progress(object):
def __init__(self, result_queue, tdir):
self.result_queue = result_queue
self.tdir = tdir
def __call__(self, id):
cover = os.path.join(self.tdir, str(id))
if not os.path.exists(cover): cover = None
res = os.path.join(self.tdir, '%s.error'%id)
if not os.path.exists(res):
res = res.replace('.error', '.opf')
self.result_queue.put((id, res, cover))
class ReadMetadata(Thread):
def __init__(self, tasks, result_queue, spare_server=None):
self.tasks, self.result_queue = tasks, result_queue
self.spare_server = spare_server
self.canceled = False
Thread.__init__(self)
self.daemon = True
self.failure_details = {}
self.tdir = PersistentTemporaryDirectory('_rm_worker')
def run(self):
jobs, ids = set([]), set([])
for t in self.tasks:
for b in t:
ids.add(b[0])
progress = Progress(self.result_queue, self.tdir)
server = Server() if self.spare_server is None else self.spare_server
try:
for i, task in enumerate(self.tasks):
job = ParallelJob('read_metadata',
'Read metadata (%d of %d)'%(i, len(self.tasks)),
lambda x,y:x, args=[task, self.tdir])
jobs.add(job)
server.add_job(job)
while not self.canceled:
time.sleep(0.2)
running = False
for job in jobs:
while True:
try:
id = job.notifications.get_nowait()[-1]
if id in ids:
progress(id)
ids.remove(id)
except Empty:
break
job.update(consume_notifications=False)
if not job.is_finished:
running = True
if not running:
break
finally:
server.close()
time.sleep(1)
if self.canceled:
return
for id in ids:
progress(id)
for job in jobs:
if job.failed:
prints(job.details)
if os.path.exists(job.log_path):
try:
os.remove(job.log_path)
except:
pass
def read_metadata(paths, result_queue, chunk=50, spare_server=None):
tasks = []
pos = 0
while pos < len(paths):
tasks.append(paths[pos:pos+chunk])
pos += chunk
t = ReadMetadata(tasks, result_queue, spare_server=spare_server)
t.start()
return t
###########################################################################
############ Saving #####################
###########################################################################
class SaveWorker(Thread):
def __init__(self, result_queue, db, ids, path, opts, spare_server=None):
Thread.__init__(self)
self.daemon = True
self.path, self.opts = path, opts
self.ids = ids
self.db = db
self.canceled = False
self.result_queue = result_queue
self.error = None
self.spare_server = spare_server
self.start()
def collect_data(self, ids, tdir):
from calibre.ebooks.metadata.opf2 import metadata_to_opf
data = {}
for i in set(ids):
mi = self.db.get_metadata(i, index_is_id=True, get_cover=True,
cover_as_data=True)
opf = metadata_to_opf(mi)
if isbytestring(opf):
opf = opf.decode('utf-8')
cpath = None
if mi.cover_data and mi.cover_data[1]:
cpath = os.path.join(tdir, 'cover_%s.jpg'%i)
with lopen(cpath, 'wb') as f:
f.write(mi.cover_data[1])
if isbytestring(cpath):
cpath = cpath.decode(filesystem_encoding)
formats = {}
if mi.formats:
for fmt in mi.formats:
fpath = os.path.join(tdir, 'fmt_%s.%s'%(i, fmt.lower()))
with lopen(fpath, 'wb') as f:
try:
self.db.copy_format_to(i, fmt, f, index_is_id=True)
except NoSuchFormat:
continue
else:
if isbytestring(fpath):
fpath = fpath.decode(filesystem_encoding)
formats[fmt.lower()] = fpath
data[i] = [opf, cpath, formats, mi.last_modified.isoformat()]
return data
def run(self):
with TemporaryDirectory('save_to_disk_data') as tdir:
self._run(tdir)
def _run(self, tdir):
from calibre.library.save_to_disk import config
server = Server() if self.spare_server is None else self.spare_server
ids = set(self.ids)
tasks = server.split(list(ids))
jobs = set([])
c = config()
recs = {}
for pref in c.preferences:
recs[pref.name] = getattr(self.opts, pref.name)
plugboards = self.db.prefs.get('plugboards', {})
template_functions = self.db.prefs.get('user_template_functions', [])
for i, task in enumerate(tasks):
tids = [x[-1] for x in task]
data = self.collect_data(tids, tdir)
dpath = os.path.join(tdir, '%d.json'%i)
with open(dpath, 'wb') as f:
f.write(json.dumps(data, ensure_ascii=False).encode('utf-8'))
job = ParallelJob('save_book',
'Save books (%d of %d)'%(i, len(tasks)),
lambda x,y:x,
args=[tids, dpath, plugboards, template_functions, self.path, recs])
jobs.add(job)
server.add_job(job)
while not self.canceled:
time.sleep(0.2)
running = False
for job in jobs:
self.get_notifications(job, ids)
if not job.is_finished:
running = True
if not running:
break
for job in jobs:
if not job.result:
continue
for id_, title, ok, tb in job.result:
if id_ in ids:
self.result_queue.put((id_, title, ok, tb))
ids.remove(id_)
server.close()
time.sleep(1)
if self.canceled:
return
for job in jobs:
if job.failed:
prints(job.details)
self.error = job.details
if os.path.exists(job.log_path):
try:
os.remove(job.log_path)
except:
pass
def get_notifications(self, job, ids):
job.update(consume_notifications=False)
while True:
try:
id, title, ok, tb = job.notifications.get_nowait()[0]
if id in ids:
self.result_queue.put((id, title, ok, tb))
ids.remove(id)
except Empty:
break
def save_book(ids, dpath, plugboards, template_functions, path, recs,
notification=lambda x,y:x):
from calibre.library.save_to_disk import config, save_serialized_to_disk
from calibre.customize.ui import apply_null_metadata
from calibre.utils.formatter_functions import load_user_template_functions
load_user_template_functions('', template_functions)
opts = config().parse()
for name in recs:
setattr(opts, name, recs[name])
results = []
def callback(id, title, failed, tb):
results.append((id, title, not failed, tb))
notification((id, title, not failed, tb))
return True
data_ = json.loads(open(dpath, 'rb').read().decode('utf-8'))
data = {}
for k, v in data_.iteritems():
data[int(k)] = v
with apply_null_metadata:
save_serialized_to_disk(ids, data, plugboards, path, opts, callback)
return results
| gpl-3.0 |
oudalab/fajita | pythonAPI/flask/lib/python3.5/site-packages/setuptools/command/install_scripts.py | 454 | 2439 | from distutils import log
import distutils.command.install_scripts as orig
import os
import sys
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit |
tm1249wk/WASHLIGGGHTS-2.3.7 | python/examples/pizza/vmd.py | 31 | 8758 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# vmd tool
# Minimalistic VMD embedding for Pizza.py
# (c) 2010 Axel Kohlmeyer <akohlmey@gmail.com>
# This class will replace the VMD startup script,
# open a pipe to the executable,
# and feed it Tcl command lines one at a time
oneline = "Control VMD from python"
docstr = """
v = vmd() start up VMD
v.stop() shut down VMD instance
v.clear() delete all visualizations
v.rep(style) set default representation style. One of
(Lines|VDW|Licorice|DynamicBonds|Points|CPK)
v.new(file[,type]) load new file (default file type 'lammpstrj')
v.data(file[,atomstyle]) load new data file (default atom style 'full')
v.replace(file[,type]) replace current frames with new file
v.append(file[,type]) append file to current frame(s)
v.set(snap,x,y,z,(True|False)) set coordinates from a pizza.py snapshot to new or current frame
v.frame(frame) set current frame
v.flush() flush pending input to VMD and update GUI
v.read(file) read Tcl script file (e.g. saved state)
v.enter() enter interactive shell
v.debug([True|False]) display generated VMD script commands?
"""
# History
# 11/10, Axel Kohlmeyer (Temple U): original version
# Imports and external programs
import types, os
import numpy
try: from DEFAULTS import PIZZA_VMDNAME
except: PIZZA_VMDNAME = "vmd"
try: from DEFAULTS import PIZZA_VMDDIR
except: PIZZA_VMDDIR = "/usr/local/lib/vmd"
try: from DEFAULTS import PIZZA_VMDDEV
except: PIZZA_VMDDEV = "win"
try: from DEFAULTS import PIZZA_VMDARCH
except: PIZZA_VMDARCH = "LINUX"
# try these settings for a Mac
#PIZZA_VMDNAME = "vmd"
#PIZZA_VMDDIR = "/Applications/VMD\ 1.8.7.app/Contents/vmd"
#PIZZA_VMDDEV = "win"
#PIZZA_VMDARCH = "MACOSXX86"
try: import pexpect
except:
print "pexpect from http://pypi.python.org/pypi/pexpect", \
"is required for vmd tool"
raise
# Class definition
class vmd:
# --------------------------------------------------------------------
def __init__(self):
self.vmddir = PIZZA_VMDDIR
self.vmdexe = PIZZA_VMDDIR + '/' + PIZZA_VMDNAME + '_' + PIZZA_VMDARCH
# these are all defaults copied from the vmd launch script
os.environ['VMDDIR'] = PIZZA_VMDDIR
os.environ['VMDDISPLAYDEVICE'] = PIZZA_VMDDEV
os.environ['VMDSCRPOS'] = "596 190"
os.environ['VMDSCRSIZE'] = "669 834"
os.environ['VMDSCRHEIGHT'] = "6.0"
os.environ['VMDSCRDIST'] = "-2.0"
os.environ['VMDTITLE'] = "on"
os.environ['TCL_LIBRARY'] = PIZZA_VMDDIR + "/scripts/tcl"
os.environ['STRIDE_BIN'] = PIZZA_VMDDIR + "/stride_" + PIZZA_VMDARCH
os.environ['SURF_BIN'] = PIZZA_VMDDIR + "/surf_" + PIZZA_VMDARCH
os.environ['TACHYON_BIN'] = PIZZA_VMDDIR + "/tachyon_" + PIZZA_VMDARCH
ldpath = os.environ.get('LD_LIBRARY_PATH','')
if ldpath == '':
os.environ['LD_LIBRARY_PATH'] = PIZZA_VMDDIR
else:
os.environ['LD_LIBRARY_PATH'] = ldpath + ':' + PIZZA_VMDDIR
ldpath = os.environ.get('LD_LIBRARY_PATH','')
if ldpath == '':
os.environ['PYTHONPATH'] = PIZZA_VMDDIR
else:
os.environ['PYTHONPATH'] = PIZZA_VMDDIR + "/scripts/python"
self.debugme = False
# open pipe to vmd and wait until we have a prompt
self.VMD = pexpect.spawn(self.vmdexe)
self.VMD.expect('vmd >')
# --------------------------------------------------------------------
# post command to vmd and wait until the prompt returns.
def __call__(self,command):
if self.VMD.isalive():
self.VMD.sendline(command)
self.VMD.expect('vmd >')
if self.debugme:
print "call+result:"+self.VMD.before
return
# --------------------------------------------------------------------
# exit VMD
def stop(self):
self.__call__("quit")
del self.VMD
# --------------------------------------------------------------------
# force VMD display and GUI update.
def flush(self):
self.__call__('display update ui')
# --------------------------------------------------------------------
# turn on debugging info
def debug(self,status=True):
if status and not self.debugme:
print 'Turning vmd.py debugging ON.'
if not status and self.debugme:
print 'Turning vmd.py debugging OFF.'
self.debugme = status
# --------------------------------------------------------------------
# emulate a regular tcl command prompt
def enter(self,mode='tcl'):
self.__call__('menu main off')
self.__call__('menu main on')
while 1:
try:
command = raw_input("vmd > ")
except EOFError:
print "(EOF)"
self.__call__('menu main off')
return
if command == "quit" or command == "exit":
self.__call__('menu main off')
return
if command == "gopython":
print "gopython not supported here"
continue
self.__call__(command)
# --------------------------------------------------------------------
# read and execute tcl script file (e.g. a saved state)
def read(self,filename):
self.__call__('play ' + filename)
self.flush()
# --------------------------------------------------------------------
# remove all molecules, data and visualizations
def clear(self):
self.__call__("mol delete all")
# --------------------------------------------------------------------
# navigate to a given frame
def rep(self,style='Lines'):
if style == 'Lines' or style == 'VDW' or style == 'Licorice' \
or style == 'DynamicBonds' or style == 'Points' or style == 'CPK':
self.__call__('mol default style ' + style)
# --------------------------------------------------------------------
# navigate to a given frame
def frame(self,framespec):
self.__call__('animate goto ' + str(framespec))
# --------------------------------------------------------------------
# load a new molecule from a file supported by a molfile plugin
def new(self,filename,filetype='lammpstrj'):
self.__call__('mol new ' + filename + ' type ' + filetype + ' waitfor all')
self.flush()
# --------------------------------------------------------------------
# load a new molecule from a data file via the topotools plugin
def data(self,filename,atomstyle='full'):
self.__call__('package require topotools 1.0')
self.__call__('topo readlammpsdata ' + filename + ' ' + atomstyle)
self.flush()
# --------------------------------------------------------------------
# append all frames from a given file to the current molecule
def append(self,filename,filetype='lammpstrj'):
self.__call__('set tmol [molinfo top]')
self.__call__('array set viewpoints {}')
self.__call__('foreach mol [molinfo list] { set viewpoints($mol) [molinfo $mol get { center_matrix rotate_matrix scale_matrix global_matrix}]}')
self.__call__('mol addfile ' + filename + ' mol $tmol type ' + filetype + ' waitfor all')
self.__call__('foreach mol [molinfo list] { molinfo $mol set {center_matrix rotate_matrix scale_matrix global_matrix} $viewpoints($mol)}')
self.flush()
# --------------------------------------------------------------------
# replace all frames of a molecule with those from a given file
def update(self,filename,filetype='lammpstrj'):
self.__call__('set tmol [molinfo top]')
self.__call__('array set viewpoints {}')
self.__call__('foreach mol [molinfo list] {set viewpoints($mol) [molinfo $mol get { center_matrix rotate_matrix scale_matrix global_matrix}]}')
self.__call__('animate delete all $tmol')
self.__call__('mol addfile ' + filename + ' mol $tmol type ' + filetype + ' waitfor all')
self.__call__('foreach mol [molinfo list] {molinfo $mol set {center_matrix rotate_matrix scale_matrix global_matrix} $viewpoints($mol)}')
self.flush()
# --------------------------------------------------------------------
# add or overwrite coordinates with coordinates in a snapshot
def set(self,snap,x,y,z,append=True):
self.__call__('set vmdsel [atomselect top all]')
if append:
self.__call__('animate dup [molinfo top]')
cmd = '$vmdsel set {x y z} {'
for idx in range(0,snap.natoms):
cmd += ' {'+str(snap[idx,x])+' '+str(snap[idx,y])+' '+str(snap[idx,z])+'}'
cmd += '}'
self.__call__(cmd)
self.__call__('$vmdsel delete ; unset vmdsel')
self.flush()
| gpl-2.0 |
OpenPathView/batchPanoMaker | opv_import/helpers/udev_observer.py | 1 | 1433 | # coding: utf-8
# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Benjamin BERNARD <benjamin.bernard@openpathview.fr>
# Email: team@openpathview.fr
# Description: Simply create an udev observer.
import pyudev
def create_udev_block_observer(event_listener, observer_name: str) -> pyudev.MonitorObserver:
"""
Create an udev block observer.
:param event_listener: Lambda executed when a new block device is detected. Will be executed with and : action: str, device: pyudev.Device
:param observer_name: Name of the observer.
:return: The created observer.
"""
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='block')
return pyudev.MonitorObserver(monitor, event_listener, name=observer_name) | gpl-3.0 |
Linkid/numpy | doc/f2py/collectinput.py | 111 | 2300 | #!/usr/bin/env python
"""
collectinput - Collects all files that are included to a main Latex document
with \input or \include commands. These commands must be
in separate lines.
Copyright 1999 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
Usage:
collectinput <infile> <outfile>
collectinput <infile> # <outfile>=inputless_<infile>
collectinput # in and out are stdin and stdout
"""
from __future__ import division, absolute_import, print_function
__version__ = "0.0"
stdoutflag=0
import sys
import fileinput
import re
if sys.version_info[0] >= 3:
from subprocess import getoutput
else:
from commands import getoutput
try: fn=sys.argv[2]
except:
try: fn='inputless_'+sys.argv[1]
except: stdoutflag=1
try: fi=sys.argv[1]
except: fi=()
if not stdoutflag:
sys.stdout=open(fn, 'w')
nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)'
input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?')
comment=re.compile(r'[^%]*%')
for l in fileinput.input(fi):
l=l[:-1]
l1=''
if comment.match(l):
m=comment.match(l)
l1=l[m.end()-1:]
l=l[:m.end()-1]
m=input.match(l)
if m:
l=l.strip()
if l[-1]=='}': l=l[:-1]
i=m.end()-2
sys.stderr.write('>>>>>>')
while i>-1 and (l[i] not in [' ', '{']): i=i-1
if i>-1:
fn=l[i+1:]
try: f=open(fn, 'r'); flag=1; f.close()
except:
try: f=open(fn+'.tex', 'r'); flag=1;fn=fn+'.tex'; f.close()
except: flag=0
if flag==0:
sys.stderr.write('Could not open a file: '+fn+'\n')
print(l+l1)
continue
elif flag==1:
sys.stderr.write(fn+'\n')
print('%%%%% Begin of '+fn)
print(getoutput(sys.argv[0]+' < '+fn))
print('%%%%% End of '+fn)
else:
sys.stderr.write('Could not extract a file name from: '+l)
print(l+l1)
else:
print(l+l1)
sys.stdout.close()
| bsd-3-clause |
shanglt/youtube-dl | youtube_dl/extractor/bild.py | 94 | 1485 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
fix_xml_ampersands,
)
class BildIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
IE_DESC = 'Bild.de'
_TEST = {
'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
'md5': 'dd495cbd99f2413502a1713a1156ac8a',
'info_dict': {
'id': '38184146',
'ext': 'mp4',
'title': 'BILD hat sie getestet',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 196,
'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands)
duration = int_or_none(doc.attrib.get('duration'), scale=1000)
return {
'id': video_id,
'title': doc.attrib['ueberschrift'],
'description': doc.attrib.get('text'),
'url': doc.attrib['src'],
'thumbnail': doc.attrib.get('img'),
'duration': duration,
}
| unlicense |
adam-lee/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.