repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
FowlerLab/Enrich2 | enrich2/seqlib.py | 1 | 15885 | from __future__ import print_function
import logging
import os.path
import pandas as pd
import numpy as np
from collections import OrderedDict
from matplotlib.backends.backend_pdf import PdfPages
import sys
from .plots import counts_plot
from .storemanager import StoreManager, fix_filename, ELEMENT_LABELS
class SeqLib(StoreManager):
"""
Abstract class for handling count data from a single sequencing library.
"""
# Note: the following block is referenced by line number above
# When adding new messages, update the documentation line numbers also!
filter_messages = OrderedDict(
[
("min quality", "single-base quality"),
("avg quality", "average quality"),
("max N", "excess N bases"),
("chastity", "not chaste"),
("remove unresolvable", "unresolvable mismatch"),
("merge failure", "unable to merge reads"),
("total", "total"),
]
)
store_suffix = "lib"
def __init__(self):
StoreManager.__init__(self)
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
self.timepoint = None
self.counts_file = None
self.report_filtered = None
self._filters = dict()
self.filter_stats = dict()
self.default_filters = dict()
self.default_filters.update({"min quality": 0})
self.default_filters.update({"max N": sys.maxsize})
self.default_filters.update({"avg quality": 0})
self.default_filters.update({"chastity": False})
@property
def filters(self):
return self._filters
@filters.setter
def filters(self, config_filters):
"""
Set up the filters dictionary using the options selected in
*config_filters*, filling in missing entries with defaults.
"""
self._filters.clear()
self._filters.update(self.default_filters)
unused = list()
for key in config_filters:
if key in self._filters:
if config_filters[key] is not None:
self._filters[key] = config_filters[key]
else:
unused.append(key)
if len(unused) > 0:
self.logger.warning(
"Unused filter parameters ({})" "".format(", ".join(unused))
)
self.filter_stats.clear()
for key in self._filters:
self.filter_stats[key] = 0
self.filter_stats["total"] = 0
def serialize_filters(self):
"""
Return a dictionary of filtering options that have non-default values.
"""
cfg = dict()
for key in self.filters.keys():
if self.filters[key] != self.default_filters[key]:
cfg[key] = self.filters[key]
return cfg
def _children(self):
"""
These objects have no children. Returns ``None``.
"""
return None
def add_child(self, child):
"""
No children, raises an AttributeError.
"""
raise AttributeError("SeqLib objects do not support adding children")
def remove_child_id(self, tree_id):
"""
No children, raises an AttributeError.
"""
raise AttributeError("SeqLib objects do not support removing children")
def validate(self):
"""
Validates paramaters for a configured SeqLib. Currently does nothing.
"""
pass
def has_wt_sequence(self):
"""
Returns whether or not the object has a wild type sequence. Returns
``False`` unless overloaded by a derived class (such as
:py:class:`~seqlib.seqlib.VariantSeqLib`).
"""
return False
def configure(self, cfg):
"""
Set up the object using the config object *cfg*, usually derived from
a ``.json`` file.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
try:
self.timepoint = int(cfg["timepoint"])
if "report filtered reads" in cfg:
self.report_filtered = cfg["report filtered reads"]
else:
self.report_filtered = False
if "counts file" in cfg:
self.counts_file = cfg["counts file"]
else:
self.counts_file = None
except KeyError as key:
raise KeyError(
"Missing required config value {key}" "".format(key=key), self.name
)
except ValueError as value:
raise ValueError(
"Invalid parameter value {value}" "".format(value=value), self.name
)
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["timepoint"] = self.timepoint
cfg["report filtered reads"] = self.report_filtered
if self.counts_file is not None:
cfg["counts file"] = self.counts_file
return cfg
def calculate(self):
"""
Pure virtual method that defines how the data are counted.
"""
raise NotImplementedError("must be implemented by subclass")
def report_filtered_read(self, fq, filter_flags):
"""
Write the :py:class:`~fqread.FQRead` object *fq* to the ``DEBUG``
log. The dictionary *filter_flags* contains ``True``
values for each filtering option that applies to *fq*. Keys in
*filter_flags* are converted to messages using the
``SeqLib.filter_messages`` dictionary.
"""
self.logger.debug(
"Filtered read ({messages})\n{read!s}".format(
messages=", ".join(
SeqLib.filter_messages[x] for x in filter_flags if filter_flags[x]
),
name=self.name,
read=fq,
)
)
def save_counts(self, label, df_dict, raw):
"""
Convert the counts in the dictionary *df_dict* into a DataFrame object
and save it to the data store.
If *raw* is ``True``, the counts are stored under
``"/raw/label/counts"``; else ``"/main/label/counts"``.
"""
if len(df_dict.keys()) == 0:
raise ValueError("Failed to count {} [{}]".format(label, self.name))
df = pd.DataFrame.from_dict(df_dict, orient="index", dtype=np.int32)
df.columns = ["count"]
df.sort_values("count", ascending=False, inplace=True)
self.logger.info(
"Counted {n} {label} ({u} unique)".format(
n=df["count"].sum(), u=len(df.index), label=label
)
)
if raw:
key = "/raw/{}/counts".format(label)
else:
key = "/main/{}/counts".format(label)
self.store.put(key, df, format="table", data_columns=df.columns)
del df
def save_filtered_counts(self, label, query):
"""
Filter the counts in ``"/raw/label/counts"`` using the *query* string
and store the result in ``"/main/label/counts"``
For more information on building query strings, see
http://pandas.pydata.org/pandas-docs/stable/io.html#querying-a-table
"""
self.logger.info("Converting raw {} counts to main counts".format(label))
raw_table = "/raw/{}/counts".format(label)
main_table = "/main/{}/counts".format(label)
self.map_table(source=raw_table, destination=main_table, source_query=query)
self.logger.info(
"Counted {n} {label} ({u} unique) after query".format(
n=self.store[main_table]["count"].sum(),
u=len(self.store[main_table].index),
label=label,
)
)
def report_filter_stats(self):
"""
Create report file for the number of filtered reads.
The report file is located in the output directory, named
``SeqLibName.filter.txt``.
It contains the number of reads filtered for each category, plus the
total number filtered.
.. note:: Reads are checked for all quality-based criteria before \
filtering.
"""
with open(
os.path.join(self.output_dir, fix_filename(self.name) + ".filter.txt"), "w"
) as handle:
for key in sorted(
self.filter_stats, key=self.filter_stats.__getitem__, reverse=True
):
if key != "total" and self.filter_stats[key] > 0:
print(
SeqLib.filter_messages[key],
self.filter_stats[key],
sep="\t",
file=handle,
)
print("total", self.filter_stats["total"], sep="\t", file=handle)
self.logger.info("Wrote filtering statistics")
def save_filter_stats(self):
"""
Save a DataFrame containing the number of filtered reads under
``'/raw/filter'``.
This DataFrame contains the same information as ``report_filter_stats``
"""
df = pd.DataFrame(index=SeqLib.filter_messages.values(), columns=["count"])
for key in self.filter_stats.keys():
if self.filter_stats[key] > 0 or key == "total":
df.loc[SeqLib.filter_messages[key], "count"] = self.filter_stats[key]
df.dropna(inplace=True)
self.store.put(
"/raw/filter", df.astype(int), format="table", data_columns=df.columns
)
def read_quality_filter(self, fq):
"""
Check the quality of the FQRead object *fq*.
Checks ``'chastity'``, ``'min quality'``, ``'avg quality'``,
``'max N'``, and ``'remove unresolvable'``.
Counts failed reads for later output and reports the filtered read if
desired.
Returns ``True`` if the read passes all filters, else ``False``.
"""
filter_flags = dict()
for key in self.filters:
filter_flags[key] = False
if self.filters["chastity"]:
if not fq.is_chaste():
self.filter_stats["chastity"] += 1
filter_flags["chastity"] = True
if self.filters["min quality"] > 0:
if fq.min_quality() < self.filters["min quality"]:
self.filter_stats["min quality"] += 1
filter_flags["min quality"] = True
if self.filters["avg quality"] > 0:
if fq.mean_quality() < self.filters["avg quality"]:
self.filter_stats["avg quality"] += 1
filter_flags["avg quality"] = True
if self.filters["max N"] >= 0:
if fq.sequence.upper().count("N") > self.filters["max N"]:
self.filter_stats["max N"] += 1
filter_flags["max N"] = True
if "remove unresolvable" in self.filters: # OverlapSeqLib only
if self.filters["remove unresolvable"]:
if "X" in fq.sequence:
self.filter_stats["remove unresolvable"] += 1
filter_flags["remove unresolvable"] = True
# update total and report if failed
if any(filter_flags.values()):
self.filter_stats["total"] += 1
if self.report_filtered:
self.report_filtered_read(fq, filter_flags)
return False
else:
return True
def make_plots(self):
"""
Make plots that are shared by all :py:class:`~seqlib.seqlib.SeqLib`
objects.
Creates counts histograms for all labels.
"""
if self.plots_requested:
self.logger.info("Creating plots")
pdf = PdfPages(os.path.join(self.plot_dir, "counts.pdf"))
for label in self.labels:
counts_plot(self, label, pdf, log=True)
counts_plot(self, label, pdf, log=False)
pdf.close()
def write_tsv(self):
"""
Write each table from the store to its own tab-separated file.
Files are written to a ``tsv`` directory in the default output
location.
File names are the HDF5 key with ``'_'`` substituted for ``'/'``.
"""
if self.tsv_requested:
self.logger.info("Generating tab-separated output files")
for k in self.store.keys():
self.write_table_tsv(k)
def counts_from_file_h5(self, fname):
"""
If an HDF store containing raw counts has been specified, open the
store, copy those counts into this store, and close the counts store.
Copies all tables in the ``'/raw'`` group along with their metadata.
"""
store = pd.HDFStore(fname)
self.logger.info(
"Using existing HDF5 data store '{}' for raw data" "".format(fname)
)
# this could probably be much more efficient, but the PyTables docs
# don't explain copying subsets of files adequately
raw_keys = [key for key in store.keys() if key.startswith("/raw/")]
if len(raw_keys) == 0:
raise ValueError(
"No raw counts found in '{}' [{}]" "".format(fname, self.name)
)
else:
for k in raw_keys:
# copy the data table
raw = store[k]
self.store.put(k, raw, format="table", data_columns=raw.columns)
# copy the metadata
self.set_metadata(k, self.get_metadata(k, store=store), update=False)
self.logger.info("Copied raw data '{}'".format(k))
store.close()
def counts_from_file_tsv(self, fname):
"""
If a counts file in tsv format has been specified, read the counts into
a new dataframe and save as raw counts.
"""
df = pd.read_table(fname, sep="\t", header=0, index_col=0)
if df.columns != ["count"]:
raise ValueError(
"Invalid column names for counts file [{}]" "".format(self.name)
)
if len(df) == 0:
raise ValueError("Empty counts file [{}]".format(self.name))
label = None
for elem in ELEMENT_LABELS:
if elem in self.labels:
label = elem
break
if label is None:
raise ValueError("No valid element labels [{}]".format(self.name))
key = "/raw/{}/counts".format(label)
self.store.put(key, df, format="table", data_columns=df.columns, dtype=np.int32)
def counts_from_file(self, fname):
"""Get raw counts from a counts file instead of FASTQ_ file.
The ``'/raw/<element>/counts'`` table will be populated using the given
input file. The input file should be a two-column file readable by
``pandas`` as a series or two-column dataframe or an Enrich2 HDF5 file.
If the input file is a two-column file, the index will be checked using
the SeqLib's ``validate_index()`` method.
If the input file is an HDF5 file, the entire set of ``'/raw'`` tables
will be copied over, with the metadata intact.
"""
if not os.path.exists(fname):
raise IOError("Counts file '{}' not found [{}]" "".format(fname, self.name))
elif os.path.splitext(fname)[-1].lower() in (".h5"):
self.counts_from_file_h5(self.counts_file)
elif os.path.splitext(fname)[-1].lower() in (".txt", ".tsv", ".csv"):
self.counts_from_file_tsv(self.counts_file)
else:
raise ValueError(
"Unrecognized counts file extension for '{}' "
"[{}]".format(fname, self.name)
)
| bsd-3-clause |
MikeAmy/django | tests/admin_changelist/models.py | 276 | 2890 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, models.SET_NULL, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
@python_2_unicode_compatible
class Musician(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Concert(models.Model):
name = models.CharField(max_length=30)
group = models.ForeignKey(Group, models.CASCADE)
class Membership(models.Model):
music = models.ForeignKey(Musician, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician, models.CASCADE)
band = models.ForeignKey(ChordsBand, models.CASCADE)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class SwallowOneToOne(models.Model):
swallow = models.OneToOneField(Swallow, models.CASCADE)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_queryset(self):
return super(OrderedObjectManager, self).get_queryset().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
class CustomIdUser(models.Model):
uuid = models.AutoField(primary_key=True)
| bsd-3-clause |
MRCIEU/melodi | melodi/settings.py | 1 | 8804 | """
Django settings for melodi project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from datetime import timedelta
from celery.schedules import crontab,timedelta
from django.core.urlresolvers import reverse_lazy
import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config.secret_key
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
#ALLOWED_HOSTS = []
#Add this for public
ALLOWED_HOSTS = ['melodi.biocompute.org.uk','www.melodi.biocompute.org.uk','melodi.mrcieu.ac.uk']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'browser',
'social_auth',
'django.contrib.humanize'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
#'django.middleware.cache.UpdateCacheMiddleware', #need this for cache
'django.middleware.common.CommonMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware', #need this for cache
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.google.GoogleOAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_ENABLED_BACKENDS = ('google')
LOGIN_URL = '/login/'
LOGIN_ERROR_URL = '/login-error/'
LOGIN_REDIRECT_URL = reverse_lazy('home')
GOOGLE_OAUTH2_CLIENT_ID = '744265706742-h9l3etr7pdboc8d0h0b14biiemtfsbvb.apps.googleusercontent.com'
GOOGLE_OAUTH2_CLIENT_SECRET = 'BsQyz4BxaC82kYD_O5UHcgaF'
#GOOGLE_WHITE_LISTED_DOMAINS = ['bristol.ac.uk']
SOCIAL_AUTH_USER_MODEL = 'auth.User'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'social_auth.context_processors.social_auth_by_type_backends'
)
ROOT_URLCONF = 'melodi.urls'
APPEND_SLASH = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'browser/templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'melodi.wsgi.application'
SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#}
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '/var/django/melodi/mysql.cnf',
},
}
}
# NEO4J_DATABASES = {
# 'default' : {
# 'HOST':'10.0.2.2',
# 'PORT':7474,
# 'ENDPOINT':'/db/data'
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
#STATIC_ROOT = '/var/django/melodi/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/django/melodi/'
DATA_FOLDER = os.path.join(BASE_DIR,"data/")
# CELERY SETTINGS
BROKER_URL = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_ACKS_LATE = True
#restart the worker process after every task to avoid memory leaks
CELERYD_MAX_TASKS_PER_CHILD = 1
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
#'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'debug.log'),
#'filename': '/tmp/debug.log',
'formatter': 'verbose'
},
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
},
},
'loggers': {
#'django': {
# 'handlers':['file'],
# 'propagate': True,
# 'level':'INFO',
#},
'celery': {
'handlers': ['console'],
'propagate': False,
'level': 'WARNING',
},
'browser': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
#CACHE_MIDDLEWARE_ALIAS = 'default'
#CACHE_MIDDLEWARE_SECONDS = 60480000
#CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
#"SOCKET_TIMEOUT": 50,
},
"KEY_PREFIX": "melodi",
'TIMEOUT': None
}
}
#CACHES = {
# 'default': {
# #'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'melodi_cache',
# 'TIMEOUT': None
# }
#}
CELERYBEAT_SCHEDULE = {
#'t1': {
# 'task': 'tasks.test_scheduler',
# 'schedule': timedelta(seconds=10),
#},
#update pubmed-mesh relationships every dat at 3am
'dm': {
'task': 'tasks.daily_mesh',
#'schedule': timedelta(hours=1),
'schedule': crontab(hour=3, minute=0),#
},
#'neo': {
# 'task': 'tasks.neo4j_check',
# #'schedule': timedelta(hours=1),
# 'schedule': timedelta(minutes=30),#
#},
}
# Logging
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'formatters': {
# 'verbose': {
# 'format': '[%(asctime)s] %(levelname)-8s %(process)d %(thread)d %(name)s:%(message)s',
# 'datefmt': '%Y-%m-%d %a %H:%M:%S'
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'verbose'
# },
# 'local_file': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.RotatingFileHandler',
# 'formatter': 'verbose',
# #'filename': '%s/debug.log' % APP_ROOT,
# 'filename': os.path.join(BASE_DIR, 'debug2.log'),
# 'maxBytes': 1024 * 1024 * 10,
# },
# 'syslog': {
# 'level': 'INFO',
# 'class': 'logging.handlers.SysLogHandler',
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler',
# 'include_html': True,
# }
# },
# 'loggers': {
# 'django': {
# 'handlers': ['null'],
# 'propagate': True,
# 'level': 'INFO',
# },
# 'django.request': {
# 'handlers': ['mail_admins', 'console', 'local_file'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# },
# 'root': {
# 'handlers': ['console', 'local_file'],
# 'level': 'DEBUG',
# }
# }
| mit |
RossBrunton/django | django/conf/locale/nl/formats.py | 504 | 4472 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '20 januari 2009'
TIME_FORMAT = 'H:i' # '15:23'
DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23'
YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009'
MONTH_DAY_FORMAT = 'j F' # '20 januari'
SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009'
SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23'
FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag')
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%Y', '%d-%m-%y', # '20-01-2009', '20-01-09'
'%d/%m/%Y', '%d/%m/%y', # '20/01/2009', '20/01/09'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09'
]
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '15:23:35'
'%H:%M:%S.%f', # '15:23:35.000200'
'%H.%M:%S', # '15.23:35'
'%H.%M:%S.%f', # '15.23:35.000200'
'%H.%M', # '15.23'
'%H:%M', # '15:23'
]
DATETIME_INPUT_FORMATS = [
# With time in %H:%M:%S :
'%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S',
# '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35'
'%d/%m/%Y %H:%M:%S', '%d/%m/%y %H:%M:%S', '%Y/%m/%d %H:%M:%S',
# '20/01/2009 15:23:35', '20/01/09 15:23:35', '2009/01/20 15:23:35'
# '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35'
# '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35'
# With time in %H:%M:%S.%f :
'%d-%m-%Y %H:%M:%S.%f', '%d-%m-%y %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S.%f',
# '20-01-2009 15:23:35.000200', '20-01-09 15:23:35.000200', '2009-01-20 15:23:35.000200'
'%d/%m/%Y %H:%M:%S.%f', '%d/%m/%y %H:%M:%S.%f', '%Y/%m/%d %H:%M:%S.%f',
# '20/01/2009 15:23:35.000200', '20/01/09 15:23:35.000200', '2009/01/20 15:23:35.000200'
# With time in %H.%M:%S :
'%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35'
'%d/%m/%Y %H.%M:%S', '%d/%m/%y %H.%M:%S', # '20/01/2009 15.23:35', '20/01/09 15.23:35'
# '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35'
# '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35'
# With time in %H.%M:%S.%f :
'%d-%m-%Y %H.%M:%S.%f', '%d-%m-%y %H.%M:%S.%f', # '20-01-2009 15.23:35.000200', '20-01-09 15.23:35.000200'
'%d/%m/%Y %H.%M:%S.%f', '%d/%m/%y %H.%M:%S.%f', # '20/01/2009 15.23:35.000200', '20/01/09 15.23:35.000200'
# With time in %H:%M :
'%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23'
'%d/%m/%Y %H:%M', '%d/%m/%y %H:%M', '%Y/%m/%d %H:%M', # '20/01/2009 15:23', '20/01/09 15:23', '2009/01/20 15:23'
# '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23'
# '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23'
# With time in %H.%M :
'%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23'
'%d/%m/%Y %H.%M', '%d/%m/%y %H.%M', # '20/01/2009 15.23', '20/01/09 15.23'
# '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23'
# '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23'
# Without time :
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
'%d/%m/%Y', '%d/%m/%y', '%Y/%m/%d', # '20/01/2009', '20/01/09', '2009/01/20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
JonathanSeguin/Mariana | Mariana/regularizations.py | 1 | 1924 | from Mariana.abstraction import Abstraction_ABC
__all__ = ["SingleLayerRegularizer_ABC", "L1", "L2", "ActivationL1"]
class SingleLayerRegularizer_ABC(Abstraction_ABC) :
"""An abstract regularization to be applied to a layer."""
def apply(self, layer) :
"""Apply to a layer and update networks's log"""
hyps = {}
for k in self.hyperParameters :
hyps[k] = getattr(self, k)
message = "%s uses %s regularization" % (layer.name, self.__class__.__name__)
layer.network.logLayerEvent(layer, message, hyps)
return self.getFormula(layer)
def getFormula(self, layer) :
"""Returns the expression to be added to the cost"""
raise NotImplemented("Must be implemented in child")
class L1(SingleLayerRegularizer_ABC) :
"""
Will add this to the cost. Weights will tend towards 0
resulting in sparser weight matrices.
.. math::
factor * abs(Weights)
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( abs(layer.parameters["W"]).sum() )
class L2(SingleLayerRegularizer_ABC) :
"""
Will add this to the cost. Causes the weights to stay small
.. math::
factor * (Weights)^2
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( (layer.parameters["W"] ** 2).sum() )
class ActivationL1(SingleLayerRegularizer_ABC) :
"""
L1 on the activations. Neurone activations will tend towards
0, resulting into sparser representations.
Will add this to the cost
.. math::
factor * abs(activations)
"""
def __init__(self, factor) :
SingleLayerRegularizer_ABC.__init__(self)
self.factor = factor
self.hyperParameters = ["factor"]
def getFormula(self, layer) :
return self.factor * ( abs(layer.outputs).sum() ) | apache-2.0 |
ruuk/script.evernote | lib/evernote/edam/userstore/ttypes.py | 2 | 37784 | #
# Autogenerated by Thrift Compiler (0.5.0-en-262021)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import evernote.edam.type.ttypes
import evernote.edam.error.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class SponsoredGroupRole(object):
"""
Enumeration of Sponsored Group Roles
"""
GROUP_MEMBER = 1
GROUP_ADMIN = 2
GROUP_OWNER = 3
_VALUES_TO_NAMES = {
1: "GROUP_MEMBER",
2: "GROUP_ADMIN",
3: "GROUP_OWNER",
}
_NAMES_TO_VALUES = {
"GROUP_MEMBER": 1,
"GROUP_ADMIN": 2,
"GROUP_OWNER": 3,
}
class PublicUserInfo(object):
"""
This structure is used to provide publicly-available user information
about a particular account.
<dl>
<dt>userId:</dt>
<dd>
The unique numeric user identifier for the user account.
</dd>
<dt>shardId:</dt>
<dd>
The name of the virtual server that manages the state of
this user. This value is used internally to determine which system should
service requests about this user's data.
</dd>
<dt>privilege:</dt>
<dd>
The privilege level of the account, to determine whether
this is a Premium or Free account.
</dd>
<dt>noteStoreUrl:</dt>
<dd>
This field will contain the full URL that clients should use to make
NoteStore requests to the server shard that contains that user's data.
I.e. this is the URL that should be used to create the Thrift HTTP client
transport to send messages to the NoteStore service for the account.
</dd>
</dl>
Attributes:
- userId
- shardId
- privilege
- username
- noteStoreUrl
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'userId', None, None, ), # 1
(2, TType.STRING, 'shardId', None, None, ), # 2
(3, TType.I32, 'privilege', None, None, ), # 3
(4, TType.STRING, 'username', None, None, ), # 4
(5, TType.STRING, 'noteStoreUrl', None, None, ), # 5
)
def __init__(self, userId=None, shardId=None, privilege=None, username=None, noteStoreUrl=None,):
self.userId = userId
self.shardId = shardId
self.privilege = privilege
self.username = username
self.noteStoreUrl = noteStoreUrl
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.userId = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.shardId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.privilege = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.noteStoreUrl = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PublicUserInfo')
if self.userId is not None:
oprot.writeFieldBegin('userId', TType.I32, 1)
oprot.writeI32(self.userId)
oprot.writeFieldEnd()
if self.shardId is not None:
oprot.writeFieldBegin('shardId', TType.STRING, 2)
oprot.writeString(self.shardId)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.I32, 3)
oprot.writeI32(self.privilege)
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 4)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.noteStoreUrl is not None:
oprot.writeFieldBegin('noteStoreUrl', TType.STRING, 5)
oprot.writeString(self.noteStoreUrl)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.userId is None:
raise TProtocol.TProtocolException(message='Required field userId is unset!')
if self.shardId is None:
raise TProtocol.TProtocolException(message='Required field shardId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PremiumInfo(object):
"""
This structure is used to provide information about a user's Premium account.
<dl>
<dt>currentTime:</dt>
<dd>
The server-side date and time when this data was generated.
</dd>
<dt>premium:</dt>
<dd>
True if the user's account is Premium.
</dd>
<dt>premiumRecurring</dt>
<dd>
True if the user's account is Premium and has a recurring payment method.
</dd>
<dt>premiumExpirationDate:</dt>
<dd>
The date when the user's Premium account expires, or the date when the
user's account will be charged if it has a recurring payment method.
</dd>
<dt>premiumExtendable:</dt>
<dd>
True if the user is eligible for purchasing Premium account extensions.
</dd>
<dt>premiumPending:</dt>
<dd>
True if the user's Premium account is pending payment confirmation
</dd>
<dt>premiumCancellationPending:</dt>
<dd>
True if the user has requested that no further charges to be made; the
Premium account will remain active until it expires.
</dd>
<dt>canPurchaseUploadAllowance:</dt>
<dd>
True if the user is eligible for purchasing additional upload allowance.
</dd>
<dt>sponsoredGroupName:</dt>
<dd>
The name of the sponsored group that the user is part of.
</dd>
<dt>sponsoredGroupRole:</dt>
<dd>
The role of the user within a sponsored group.
</dd>
<dt>businessName:</dt>
<dd>
The name of the business that the user is associated with.
</dd>
<dt>businessAdmin:</dt>
<dd>
True if the user is the administrator of the business.
</dd>
</dl>
Attributes:
- currentTime
- premium
- premiumRecurring
- premiumExpirationDate
- premiumExtendable
- premiumPending
- premiumCancellationPending
- canPurchaseUploadAllowance
- sponsoredGroupName
- sponsoredGroupRole
- businessName
- businessAdmin
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'currentTime', None, None, ), # 1
(2, TType.BOOL, 'premium', None, None, ), # 2
(3, TType.BOOL, 'premiumRecurring', None, None, ), # 3
(4, TType.I64, 'premiumExpirationDate', None, None, ), # 4
(5, TType.BOOL, 'premiumExtendable', None, None, ), # 5
(6, TType.BOOL, 'premiumPending', None, None, ), # 6
(7, TType.BOOL, 'premiumCancellationPending', None, None, ), # 7
(8, TType.BOOL, 'canPurchaseUploadAllowance', None, None, ), # 8
(9, TType.STRING, 'sponsoredGroupName', None, None, ), # 9
(10, TType.I32, 'sponsoredGroupRole', None, None, ), # 10
(11, TType.STRING, 'businessName', None, None, ), # 11
(12, TType.BOOL, 'businessAdmin', None, None, ), # 12
)
def __init__(self, currentTime=None, premium=None, premiumRecurring=None, premiumExpirationDate=None, premiumExtendable=None, premiumPending=None, premiumCancellationPending=None, canPurchaseUploadAllowance=None, sponsoredGroupName=None, sponsoredGroupRole=None, businessName=None, businessAdmin=None,):
self.currentTime = currentTime
self.premium = premium
self.premiumRecurring = premiumRecurring
self.premiumExpirationDate = premiumExpirationDate
self.premiumExtendable = premiumExtendable
self.premiumPending = premiumPending
self.premiumCancellationPending = premiumCancellationPending
self.canPurchaseUploadAllowance = canPurchaseUploadAllowance
self.sponsoredGroupName = sponsoredGroupName
self.sponsoredGroupRole = sponsoredGroupRole
self.businessName = businessName
self.businessAdmin = businessAdmin
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.currentTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.premium = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.premiumRecurring = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.premiumExpirationDate = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.premiumExtendable = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.premiumPending = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.premiumCancellationPending = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.canPurchaseUploadAllowance = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.sponsoredGroupName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.sponsoredGroupRole = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.businessName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.businessAdmin = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PremiumInfo')
if self.currentTime is not None:
oprot.writeFieldBegin('currentTime', TType.I64, 1)
oprot.writeI64(self.currentTime)
oprot.writeFieldEnd()
if self.premium is not None:
oprot.writeFieldBegin('premium', TType.BOOL, 2)
oprot.writeBool(self.premium)
oprot.writeFieldEnd()
if self.premiumRecurring is not None:
oprot.writeFieldBegin('premiumRecurring', TType.BOOL, 3)
oprot.writeBool(self.premiumRecurring)
oprot.writeFieldEnd()
if self.premiumExpirationDate is not None:
oprot.writeFieldBegin('premiumExpirationDate', TType.I64, 4)
oprot.writeI64(self.premiumExpirationDate)
oprot.writeFieldEnd()
if self.premiumExtendable is not None:
oprot.writeFieldBegin('premiumExtendable', TType.BOOL, 5)
oprot.writeBool(self.premiumExtendable)
oprot.writeFieldEnd()
if self.premiumPending is not None:
oprot.writeFieldBegin('premiumPending', TType.BOOL, 6)
oprot.writeBool(self.premiumPending)
oprot.writeFieldEnd()
if self.premiumCancellationPending is not None:
oprot.writeFieldBegin('premiumCancellationPending', TType.BOOL, 7)
oprot.writeBool(self.premiumCancellationPending)
oprot.writeFieldEnd()
if self.canPurchaseUploadAllowance is not None:
oprot.writeFieldBegin('canPurchaseUploadAllowance', TType.BOOL, 8)
oprot.writeBool(self.canPurchaseUploadAllowance)
oprot.writeFieldEnd()
if self.sponsoredGroupName is not None:
oprot.writeFieldBegin('sponsoredGroupName', TType.STRING, 9)
oprot.writeString(self.sponsoredGroupName)
oprot.writeFieldEnd()
if self.sponsoredGroupRole is not None:
oprot.writeFieldBegin('sponsoredGroupRole', TType.I32, 10)
oprot.writeI32(self.sponsoredGroupRole)
oprot.writeFieldEnd()
if self.businessName is not None:
oprot.writeFieldBegin('businessName', TType.STRING, 11)
oprot.writeString(self.businessName)
oprot.writeFieldEnd()
if self.businessAdmin is not None:
oprot.writeFieldBegin('businessAdmin', TType.BOOL, 12)
oprot.writeBool(self.businessAdmin)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.currentTime is None:
raise TProtocol.TProtocolException(message='Required field currentTime is unset!')
if self.premium is None:
raise TProtocol.TProtocolException(message='Required field premium is unset!')
if self.premiumRecurring is None:
raise TProtocol.TProtocolException(message='Required field premiumRecurring is unset!')
if self.premiumExtendable is None:
raise TProtocol.TProtocolException(message='Required field premiumExtendable is unset!')
if self.premiumPending is None:
raise TProtocol.TProtocolException(message='Required field premiumPending is unset!')
if self.premiumCancellationPending is None:
raise TProtocol.TProtocolException(message='Required field premiumCancellationPending is unset!')
if self.canPurchaseUploadAllowance is None:
raise TProtocol.TProtocolException(message='Required field canPurchaseUploadAllowance is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthenticationResult(object):
"""
When an authentication (or re-authentication) is performed, this structure
provides the result to the client.
<dl>
<dt>currentTime:</dt>
<dd>
The server-side date and time when this result was
generated.
</dd>
<dt>authenticationToken:</dt>
<dd>
Holds an opaque, ASCII-encoded token that can be
used by the client to perform actions on a NoteStore.
</dd>
<dt>expiration:</dt>
<dd>
Holds the server-side date and time when the
authentication token will expire.
This time can be compared to "currentTime" to produce an expiration
time that can be reconciled with the client's local clock.
</dd>
<dt>user:</dt>
<dd>
Holds the information about the account which was
authenticated if this was a full authentication. May be absent if this
particular authentication did not require user information.
</dd>
<dt>publicUserInfo:</dt>
<dd>
If this authentication result was achieved without full permissions to
access the full User structure, this field may be set to give back
a more limited public set of data.
</dd>
<dt>noteStoreUrl:</dt>
<dd>
This field will contain the full URL that clients should use to make
NoteStore requests to the server shard that contains that user's data.
I.e. this is the URL that should be used to create the Thrift HTTP client
transport to send messages to the NoteStore service for the account.
</dd>
<dt>webApiUrlPrefix:</dt>
<dd>
This field will contain the initial part of the URLs that should be used
to make requests to Evernote's thin client "web API", which provide
optimized operations for clients that aren't capable of manipulating
the full contents of accounts via the full Thrift data model. Clients
should concatenate the relative path for the various servlets onto the
end of this string to construct the full URL, as documented on our
developer web site.
</dd>
</dl>
Attributes:
- currentTime
- authenticationToken
- expiration
- user
- publicUserInfo
- noteStoreUrl
- webApiUrlPrefix
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'currentTime', None, None, ), # 1
(2, TType.STRING, 'authenticationToken', None, None, ), # 2
(3, TType.I64, 'expiration', None, None, ), # 3
(4, TType.STRUCT, 'user', (evernote.edam.type.ttypes.User, evernote.edam.type.ttypes.User.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'publicUserInfo', (PublicUserInfo, PublicUserInfo.thrift_spec), None, ), # 5
(6, TType.STRING, 'noteStoreUrl', None, None, ), # 6
(7, TType.STRING, 'webApiUrlPrefix', None, None, ), # 7
)
def __init__(self, currentTime=None, authenticationToken=None, expiration=None, user=None, publicUserInfo=None, noteStoreUrl=None, webApiUrlPrefix=None,):
self.currentTime = currentTime
self.authenticationToken = authenticationToken
self.expiration = expiration
self.user = user
self.publicUserInfo = publicUserInfo
self.noteStoreUrl = noteStoreUrl
self.webApiUrlPrefix = webApiUrlPrefix
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.currentTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.authenticationToken = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.expiration = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.user = evernote.edam.type.ttypes.User()
self.user.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.publicUserInfo = PublicUserInfo()
self.publicUserInfo.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.noteStoreUrl = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.webApiUrlPrefix = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationResult')
if self.currentTime is not None:
oprot.writeFieldBegin('currentTime', TType.I64, 1)
oprot.writeI64(self.currentTime)
oprot.writeFieldEnd()
if self.authenticationToken is not None:
oprot.writeFieldBegin('authenticationToken', TType.STRING, 2)
oprot.writeString(self.authenticationToken)
oprot.writeFieldEnd()
if self.expiration is not None:
oprot.writeFieldBegin('expiration', TType.I64, 3)
oprot.writeI64(self.expiration)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRUCT, 4)
self.user.write(oprot)
oprot.writeFieldEnd()
if self.publicUserInfo is not None:
oprot.writeFieldBegin('publicUserInfo', TType.STRUCT, 5)
self.publicUserInfo.write(oprot)
oprot.writeFieldEnd()
if self.noteStoreUrl is not None:
oprot.writeFieldBegin('noteStoreUrl', TType.STRING, 6)
oprot.writeString(self.noteStoreUrl)
oprot.writeFieldEnd()
if self.webApiUrlPrefix is not None:
oprot.writeFieldBegin('webApiUrlPrefix', TType.STRING, 7)
oprot.writeString(self.webApiUrlPrefix)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.currentTime is None:
raise TProtocol.TProtocolException(message='Required field currentTime is unset!')
if self.authenticationToken is None:
raise TProtocol.TProtocolException(message='Required field authenticationToken is unset!')
if self.expiration is None:
raise TProtocol.TProtocolException(message='Required field expiration is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BootstrapSettings(object):
"""
This structure describes a collection of bootstrap settings.
<dl>
<dt>serviceHost:</dt>
<dd>
The hostname and optional port for composing Evernote web service URLs.
This URL can be used to access the UserStore and related services,
but must not be used to compose the NoteStore URL. Client applications
must handle serviceHost values that include only the hostname
(e.g. www.evernote.com) or both the hostname and port (e.g. www.evernote.com:8080).
If no port is specified, or if port 443 is specified, client applications must
use the scheme "https" when composing URLs. Otherwise, a client must use the
scheme "http".
</dd>
<dt>marketingUrl:</dt>
<dd>
The URL stem for the Evernote corporate marketing website, e.g. http://www.evernote.com.
This stem can be used to compose website URLs. For example, the URL of the Evernote
Trunk is composed by appending "/about/trunk/" to the value of marketingUrl.
</dd>
<dt>supportUrl:</dt>
<dd>
The full URL for the Evernote customer support website, e.g. https://support.evernote.com.
</dd>
<dt>accountEmailDomain:</dt>
<dd>
The domain used for an Evernote user's incoming email address, which allows notes to
be emailed into an account. E.g. m.evernote.com.
</dd>
<dt>enableFacebookSharing:</dt>
<dd>
Whether the client application should enable sharing of notes on Facebook.
</dd>
<dt>enableGiftSubscriptions:</dt>
<dd>
Whether the client application should enable gift subscriptions.
</dd>
<dt>enableSupportTickets:</dt>
<dd>
Whether the client application should enable in-client creation of support tickets.
</dd>
<dt>enableSharedNotebooks:</dt>
<dd>
Whether the client application should enable shared notebooks.
</dd>
<dt>enableSingleNoteSharing:</dt>
<dd>
Whether the client application should enable single note sharing.
</dd>
<dt>enableSponsoredAccounts:</dt>
<dd>
Whether the client application should enable sponsored accounts.
</dd>
<dt>enableTwitterSharing:</dt>
<dd>
Whether the client application should enable sharing of notes on Twitter.
</dd>
</dl>
Attributes:
- serviceHost
- marketingUrl
- supportUrl
- accountEmailDomain
- enableFacebookSharing
- enableGiftSubscriptions
- enableSupportTickets
- enableSharedNotebooks
- enableSingleNoteSharing
- enableSponsoredAccounts
- enableTwitterSharing
- enableLinkedInSharing
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'serviceHost', None, None, ), # 1
(2, TType.STRING, 'marketingUrl', None, None, ), # 2
(3, TType.STRING, 'supportUrl', None, None, ), # 3
(4, TType.STRING, 'accountEmailDomain', None, None, ), # 4
(5, TType.BOOL, 'enableFacebookSharing', None, None, ), # 5
(6, TType.BOOL, 'enableGiftSubscriptions', None, None, ), # 6
(7, TType.BOOL, 'enableSupportTickets', None, None, ), # 7
(8, TType.BOOL, 'enableSharedNotebooks', None, None, ), # 8
(9, TType.BOOL, 'enableSingleNoteSharing', None, None, ), # 9
(10, TType.BOOL, 'enableSponsoredAccounts', None, None, ), # 10
(11, TType.BOOL, 'enableTwitterSharing', None, None, ), # 11
(12, TType.BOOL, 'enableLinkedInSharing', None, None, ), # 12
)
def __init__(self, serviceHost=None, marketingUrl=None, supportUrl=None, accountEmailDomain=None, enableFacebookSharing=None, enableGiftSubscriptions=None, enableSupportTickets=None, enableSharedNotebooks=None, enableSingleNoteSharing=None, enableSponsoredAccounts=None, enableTwitterSharing=None, enableLinkedInSharing=None,):
self.serviceHost = serviceHost
self.marketingUrl = marketingUrl
self.supportUrl = supportUrl
self.accountEmailDomain = accountEmailDomain
self.enableFacebookSharing = enableFacebookSharing
self.enableGiftSubscriptions = enableGiftSubscriptions
self.enableSupportTickets = enableSupportTickets
self.enableSharedNotebooks = enableSharedNotebooks
self.enableSingleNoteSharing = enableSingleNoteSharing
self.enableSponsoredAccounts = enableSponsoredAccounts
self.enableTwitterSharing = enableTwitterSharing
self.enableLinkedInSharing = enableLinkedInSharing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.serviceHost = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.marketingUrl = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.supportUrl = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.accountEmailDomain = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.enableFacebookSharing = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.enableGiftSubscriptions = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.enableSupportTickets = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.enableSharedNotebooks = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.enableSingleNoteSharing = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.enableSponsoredAccounts = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.enableTwitterSharing = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.enableLinkedInSharing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BootstrapSettings')
if self.serviceHost is not None:
oprot.writeFieldBegin('serviceHost', TType.STRING, 1)
oprot.writeString(self.serviceHost)
oprot.writeFieldEnd()
if self.marketingUrl is not None:
oprot.writeFieldBegin('marketingUrl', TType.STRING, 2)
oprot.writeString(self.marketingUrl)
oprot.writeFieldEnd()
if self.supportUrl is not None:
oprot.writeFieldBegin('supportUrl', TType.STRING, 3)
oprot.writeString(self.supportUrl)
oprot.writeFieldEnd()
if self.accountEmailDomain is not None:
oprot.writeFieldBegin('accountEmailDomain', TType.STRING, 4)
oprot.writeString(self.accountEmailDomain)
oprot.writeFieldEnd()
if self.enableFacebookSharing is not None:
oprot.writeFieldBegin('enableFacebookSharing', TType.BOOL, 5)
oprot.writeBool(self.enableFacebookSharing)
oprot.writeFieldEnd()
if self.enableGiftSubscriptions is not None:
oprot.writeFieldBegin('enableGiftSubscriptions', TType.BOOL, 6)
oprot.writeBool(self.enableGiftSubscriptions)
oprot.writeFieldEnd()
if self.enableSupportTickets is not None:
oprot.writeFieldBegin('enableSupportTickets', TType.BOOL, 7)
oprot.writeBool(self.enableSupportTickets)
oprot.writeFieldEnd()
if self.enableSharedNotebooks is not None:
oprot.writeFieldBegin('enableSharedNotebooks', TType.BOOL, 8)
oprot.writeBool(self.enableSharedNotebooks)
oprot.writeFieldEnd()
if self.enableSingleNoteSharing is not None:
oprot.writeFieldBegin('enableSingleNoteSharing', TType.BOOL, 9)
oprot.writeBool(self.enableSingleNoteSharing)
oprot.writeFieldEnd()
if self.enableSponsoredAccounts is not None:
oprot.writeFieldBegin('enableSponsoredAccounts', TType.BOOL, 10)
oprot.writeBool(self.enableSponsoredAccounts)
oprot.writeFieldEnd()
if self.enableTwitterSharing is not None:
oprot.writeFieldBegin('enableTwitterSharing', TType.BOOL, 11)
oprot.writeBool(self.enableTwitterSharing)
oprot.writeFieldEnd()
if self.enableLinkedInSharing is not None:
oprot.writeFieldBegin('enableLinkedInSharing', TType.BOOL, 12)
oprot.writeBool(self.enableLinkedInSharing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.serviceHost is None:
raise TProtocol.TProtocolException(message='Required field serviceHost is unset!')
if self.marketingUrl is None:
raise TProtocol.TProtocolException(message='Required field marketingUrl is unset!')
if self.supportUrl is None:
raise TProtocol.TProtocolException(message='Required field supportUrl is unset!')
if self.accountEmailDomain is None:
raise TProtocol.TProtocolException(message='Required field accountEmailDomain is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BootstrapProfile(object):
"""
This structure describes a collection of bootstrap settings.
<dl>
<dt>name:</dt>
<dd>
The unique name of the profile, which is guaranteed to remain consistent across
calls to getBootstrapInfo.
</dd>
<dt>settings:</dt>
<dd>
The settings for this profile.
</dd>
</dl>
Attributes:
- name
- settings
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'settings', (BootstrapSettings, BootstrapSettings.thrift_spec), None, ), # 2
)
def __init__(self, name=None, settings=None,):
self.name = name
self.settings = settings
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.settings = BootstrapSettings()
self.settings.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BootstrapProfile')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.settings is not None:
oprot.writeFieldBegin('settings', TType.STRUCT, 2)
self.settings.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
if self.settings is None:
raise TProtocol.TProtocolException(message='Required field settings is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BootstrapInfo(object):
"""
This structure describes a collection of bootstrap profiles.
<dl>
<dt>profiles:</dt>
<dd>
List of one or more bootstrap profiles, in descending
preference order.
</dd>
</dl>
Attributes:
- profiles
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'profiles', (TType.STRUCT,(BootstrapProfile, BootstrapProfile.thrift_spec)), None, ), # 1
)
def __init__(self, profiles=None,):
self.profiles = profiles
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.profiles = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = BootstrapProfile()
_elem5.read(iprot)
self.profiles.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BootstrapInfo')
if self.profiles is not None:
oprot.writeFieldBegin('profiles', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.profiles))
for iter6 in self.profiles:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.profiles is None:
raise TProtocol.TProtocolException(message='Required field profiles is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| gpl-2.0 |
wwfifi/uliweb | uliweb/contrib/staticfiles/wsgi_staticfiles.py | 2 | 3843 | import os
from werkzeug.wsgi import SharedDataMiddleware
from uliweb import settings
from uliweb.utils.filedown import filedown
class StaticFilesMiddleware(SharedDataMiddleware):
"""
This WSGI middleware is changed from werkzeug ShareDataMiddleware, but
I made it Uliweb compatable.
"""
def __init__(self, app, STATIC_URL, disallow=None, cache=True,
cache_timeout=60 * 60 * 12):
self.app = app
self.url_suffix = settings.DOMAINS.static.get('url_prefix', '')+STATIC_URL.rstrip('/') + '/'
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
path = os.path.normpath(settings.STATICFILES.get('STATIC_FOLDER', ''))
if path == '.':
path = ''
self.exports[self.url_suffix] = self.loader(path)
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def loader(self, dir):
def _loader(filename):
from werkzeug.exceptions import Forbidden, NotFound
from uliweb.utils.common import pkg
app = self.app
if dir:
fname = os.path.normpath(os.path.join(dir, filename)).replace('\\', '/')
if not fname.startswith(dir):
return Forbidden("You can only visit the files under static directory."), None
if os.path.exists(fname):
return fname, self._opener(fname)
for p in reversed(app.apps):
fname = os.path.normpath(os.path.join('static', filename)).replace('\\', '/')
if not fname.startswith('static/'):
return Forbidden("You can only visit the files under static directory."), None
f = pkg.resource_filename(p, fname)
if os.path.exists(f):
return f, self._opener(f)
return NotFound("Can't found the file %s" % filename), None
return _loader
def __call__(self, environ, start_response):
from werkzeug.exceptions import Forbidden
# sanitize the path for non unix systems
cleaned_path = environ.get('PATH_INFO', '').strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
flag = False
for search_path, loader in self.exports.iteritems():
if search_path == path:
flag = True
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
flag = True
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None:
if flag:
return real_filename(environ, start_response)
else:
return self.app(environ, start_response)
if not self.is_allowed(real_filename):
return Forbidden("You can not visit the file %s." % real_filename)(environ, start_response)
res = filedown(environ, real_filename, self.cache, self.cache_timeout)
return res(environ, start_response)
| bsd-2-clause |
carschar/xhtml2pdf | xhtml2pdf/turbogears.py | 99 | 1449 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| apache-2.0 |
googlefonts/pyfontaine | fontaine/charsets/noto_chars/notosansmandaic_regular.py | 2 | 1761 | # -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansMandaic-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #uniFEFF ????
chars.append(0x0640) #uni0640 ARABIC TATWEEL
chars.append(0x000D) #uni000D ????
chars.append(0x084B) #uni084B ????
chars.append(0x0020) #uni00A0 SPACE
chars.append(0x0840) #uni0840 ????
chars.append(0x0841) #uni0841 ????
chars.append(0x0842) #uni0842 ????
chars.append(0x0843) #uni0843 ????
chars.append(0x0844) #uni0844 ????
chars.append(0x0845) #uni0845 ????
chars.append(0x0846) #uni0846 ????
chars.append(0x0847) #uni0847 ????
chars.append(0x0848) #uni0848 ????
chars.append(0x0849) #uni0849 ????
chars.append(0x084A) #uni084A ????
chars.append(0x00A0) #uni00A0 NO-BREAK SPACE
chars.append(0x084C) #uni084C ????
chars.append(0x084D) #uni084D ????
chars.append(0x084E) #uni084E ????
chars.append(0x084F) #uni084F ????
chars.append(0x0850) #uni0850 ????
chars.append(0x0851) #uni0851 ????
chars.append(0x0852) #uni0852 ????
chars.append(0x0853) #uni0853 ????
chars.append(0x0854) #uni0854 ????
chars.append(0x0855) #uni0855 ????
chars.append(0x0856) #uni0856 ????
chars.append(0x0857) #uni0857 ????
chars.append(0x0858) #uni0858 ????
chars.append(0x0859) #uni0859 ????
chars.append(0x085A) #uni085A ????
chars.append(0x085B) #uni085B ????
chars.append(0x085E) #uni085E ????
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
return chars
| gpl-3.0 |
cicatiello/PokemonGo-Bot | pokemongo_bot/cell_workers/spin_fort.py | 3 | 6272 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from pgoapi.utilities import f2i
from pokemongo_bot.constants import Constants
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.cell_workers.base_task import BaseTask
from utils import distance, format_time, fort_details
class SpinFort(BaseTask):
def should_run(self):
if not self.bot.has_space_for_loot():
self.emit_event(
'inventory_full',
formatted="Not moving to any forts as there aren't enough space. You might want to change your config to recycle more items if this message appears consistently."
)
return False
return True
def work(self):
fort = self.get_fort_in_range()
if not self.should_run() or fort is None:
return WorkerResult.SUCCESS
lat = fort['latitude']
lng = fort['longitude']
details = fort_details(self.bot, fort['id'], lat, lng)
fort_name = details.get('name', 'Unknown')
response_dict = self.bot.api.fort_search(
fort_id=fort['id'],
fort_latitude=lat,
fort_longitude=lng,
player_latitude=f2i(self.bot.position[0]),
player_longitude=f2i(self.bot.position[1])
)
if 'responses' in response_dict and \
'FORT_SEARCH' in response_dict['responses']:
spin_details = response_dict['responses']['FORT_SEARCH']
spin_result = spin_details.get('result', -1)
if spin_result == 1:
self.bot.softban = False
experience_awarded = spin_details.get('experience_awarded', 0)
items_awarded = spin_details.get('items_awarded', {})
if items_awarded:
self.bot.latest_inventory = None
tmp_count_items = {}
for item in items_awarded:
item_id = item['item_id']
item_name = self.bot.item_list[str(item_id)]
if not item_name in tmp_count_items:
tmp_count_items[item_name] = item['item_count']
else:
tmp_count_items[item_name] += item['item_count']
if experience_awarded or items_awarded:
self.emit_event(
'spun_pokestop',
formatted="Spun pokestop {pokestop}. Experience awarded: {exp}. Items awarded: {items}",
data={
'pokestop': fort_name,
'exp': experience_awarded,
'items': tmp_count_items
}
)
else:
self.emit_event(
'pokestop_empty',
formatted='Found nothing in pokestop {pokestop}.',
data={'pokestop': fort_name}
)
pokestop_cooldown = spin_details.get(
'cooldown_complete_timestamp_ms')
self.bot.fort_timeouts.update({fort["id"]: pokestop_cooldown})
self.bot.recent_forts = self.bot.recent_forts[1:] + [fort['id']]
elif spin_result == 2:
self.emit_event(
'pokestop_out_of_range',
formatted="Pokestop {pokestop} out of range.",
data={'pokestop': fort_name}
)
elif spin_result == 3:
pokestop_cooldown = spin_details.get(
'cooldown_complete_timestamp_ms')
if pokestop_cooldown:
self.bot.fort_timeouts.update({fort["id"]: pokestop_cooldown})
seconds_since_epoch = time.time()
minutes_left = format_time(
(pokestop_cooldown / 1000) - seconds_since_epoch
)
self.emit_event(
'pokestop_on_cooldown',
formatted="Pokestop {pokestop} on cooldown. Time left: {minutes_left}.",
data={'pokestop': fort_name, 'minutes_left': minutes_left}
)
elif spin_result == 4:
self.emit_event(
'inventory_full',
formatted="Inventory is full!"
)
else:
self.emit_event(
'unknown_spin_result',
formatted="Unknown spint result {status_code}",
data={'status_code': str(spin_result)}
)
if 'chain_hack_sequence_number' in response_dict['responses'][
'FORT_SEARCH']:
time.sleep(2)
return response_dict['responses']['FORT_SEARCH'][
'chain_hack_sequence_number']
else:
self.emit_event(
'pokestop_searching_too_often',
formatted="Possibly searching too often, take a rest."
)
if spin_result == 1 and not items_awarded and not experience_awarded and not pokestop_cooldown:
self.bot.softban = True
self.emit_event(
'softban',
formatted='Probably got softban.'
)
else:
self.bot.fort_timeouts[fort["id"]] = (time.time() + 300) * 1000 # Don't spin for 5m
return 11
sleep(2)
return 0
def get_fort_in_range(self):
forts = self.bot.get_forts(order_by_distance=True)
forts = filter(lambda x: x["id"] not in self.bot.fort_timeouts, forts)
if len(forts) == 0:
return None
fort = forts[0]
distance_to_fort = distance(
self.bot.position[0],
self.bot.position[1],
fort['latitude'],
fort['longitude']
)
if distance_to_fort <= Constants.MAX_DISTANCE_FORT_IS_REACHABLE:
return fort
return None
| gpl-3.0 |
yoer/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/messages/storage/fallback.py | 627 | 2171 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| apache-2.0 |
crentagon/chess-with-benefits | game/chess/show_piece_stats.py | 1 | 2090 |
def run(self, board_input, i, j):
origin_piece = board_input[i][j].piece
max_control = {
1: 2,
3: 8,
4: 13,
5: 14,
9: 27,
0: 8
}
origin_piece.status = 'Healthy'
is_threatened_undefended = len(origin_piece.attackers) > len(origin_piece.defenders)
is_threatened_by_lower_rank = [x for x in origin_piece.attackers if x < origin_piece.piece_type]
is_ample_activity = origin_piece.tiles_controlled > 0.6*max_control[origin_piece.piece_type]
offensive_power = len(origin_piece.offensive_power)
defensive_power = len(origin_piece.defensive_power)
# Threatened (being attacked by a piece without being defended OR being attacked by a piece of lower rank)
if is_threatened_by_lower_rank or is_threatened_undefended:
origin_piece.status = 'Threatened'
# Warrior (attacking at least one piece OR in a valuable position OR at 60% maximum activity)
elif offensive_power >= 2 or is_ample_activity:
origin_piece.status = 'Warrior'
# Defender (defending at least two pieces)
elif defensive_power >= 2:
origin_piece.status = 'Defender'
self.piece_stats = {
'is_piece_white': origin_piece.is_white,
'piece_type': origin_piece.piece_type,
'tile_control_count': origin_piece.tiles_controlled,
'defenders': origin_piece.defenders,
'attackers': origin_piece.attackers,
'defensive_power': origin_piece.defensive_power,
'offensive_power': origin_piece.offensive_power,
'status': origin_piece.status
}
# "Status":
# Defender/Royal Defender (defending at least two pieces/Defending the King)
# Warrior (attacking at least one piece OR in a valuable position OR at 60% maximum activity)
# Healthy (default)
# Threatened (being attacked by a piece without being defended OR being attacked by a piece of lower rank)
# Note: place its value right next to it
# Number of tiles controlled: "Tile Control Count: " // add counter at the bottom
# Number of pieces attacking it: "Attackers: "
# Number of pieces defending it: "Supporters: "
# Number of pieces it is attacking: "Offensive power: "
# Number of pieces it is defending: "Defensive power: " | gpl-3.0 |
ODM2/YODA-Tools | tests/test_converter/test_output/test_dbOutput.py | 2 | 1029 | import os
from tests.test_util import build_ts_session, build_ts_specimen_session
from yodatools.converter.Outputs.dbOutput import dbOutput
from odm2api.ODMconnection import dbconnection
from odm2api.models import People, SamplingFeatures, MeasurementResultValues, TimeSeriesResultValues
curr_folder = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
# curr_folder = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# file_path = os.path.join(curr_folder, 'test_files', 'YODA_TimeSeriesSpecimen_RB_2014-15_pub.xlsx')
class TestDb:
def setup(self):
# self.connection_string = 'mysql+pymysql://ODM:odm@localhost/odm2'
self.connection_string = 'sqlite://'
self.do = dbOutput(self.connection_string)
def test_create_specimen(self):
session = build_ts_specimen_session()
self.do.save(session, self.connection_string)
def test_create_ts(self):
session = build_ts_session()
self.do.save(session, self.connection_string) | bsd-3-clause |
crimsonthunder/TRLTE_AOSP_Kernel | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
ravi-sharma/python-api-library | src/kayako/exception.py | 3 | 1207 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on May 5, 2011
@author: evan
'''
class KayakoError(StandardError):
@property
def read(self):
'''
Returns the read function of the first readable argument in this
exception.
'''
if self.args:
for arg in self.args:
if hasattr(arg, 'read'):
if callable(arg.read):
return arg.read
else:
return lambda: arg.read
class KayakoInitializationError(KayakoError):
pass
# COMM ERRORS
class KayakoIOError(KayakoError):
pass
# REQUEST ERRORS
class KayakoRequestError(KayakoIOError):
pass
class KayakoMethodNotImplementedError(KayakoRequestError):
'''
An exception for when an HTTP request method is not implemented for an
object.
'''
pass
# RESPONSE ERROR
class KayakoResponseError(KayakoIOError):
pass
| bsd-2-clause |
q1ang/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
sinkuri256/python-for-android | python3-alpha/extra_modules/gdata/test_config.py | 46 | 17822 | #!/usr/bin/env python
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import getpass
import inspect
import atom.mock_http_core
import gdata.gauth
"""Loads configuration for tests which connect to Google servers.
Settings used in tests are stored in a ConfigCollection instance in this
module called options. If your test needs to get a test related setting,
use
import gdata.test_config
option_value = gdata.test_config.options.get_value('x')
The above will check the command line for an '--x' argument, and if not
found will either use the default value for 'x' or prompt the user to enter
one.
Your test can override the value specified by the user by performing:
gdata.test_config.options.set_value('x', 'y')
If your test uses a new option which you would like to allow the user to
specify on the command line or via a prompt, you can use the register_option
method as follows:
gdata.test_config.options.register(
'option_name', 'Prompt shown to the user', secret=False #As for password.
'This is the description of the option, shown when help is requested.',
'default value, provide only if you do not want the user to be prompted')
"""
class Option(object):
def __init__(self, name, prompt, secret=False, description=None, default=None):
self.name = name
self.prompt = prompt
self.secret = secret
self.description = description
self.default = default
def get(self):
value = self.default
# Check for a command line parameter.
for i in range(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % self.name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % self.name:
value = sys.argv[i + 1]
# If the param was not on the command line, ask the user to input the
# value.
# In order for this to prompt the user, the default value for the option
# must be None.
if value is None:
prompt = '%s: ' % self.prompt
if self.secret:
value = getpass.getpass(prompt)
else:
print('You can specify this on the command line using --%s' % self.name)
value = input(prompt)
return value
class ConfigCollection(object):
def __init__(self, options=None):
self.options = options or {}
self.values = {}
def register_option(self, option):
self.options[option.name] = option
def register(self, *args, **kwargs):
self.register_option(Option(*args, **kwargs))
def get_value(self, option_name):
if option_name in self.values:
return self.values[option_name]
value = self.options[option_name].get()
if value is not None:
self.values[option_name] = value
return value
def set_value(self, option_name, value):
self.values[option_name] = value
def render_usage(self):
message_parts = []
for opt_name, option in self.options.items():
message_parts.append('--%s: %s' % (opt_name, option.description))
return '\n'.join(message_parts)
options = ConfigCollection()
# Register the default options.
options.register(
'username',
'Please enter the email address of your test account',
description=('The email address you want to sign in with. '
'Make sure this is a test account as these tests may edit'
' or delete data.'))
options.register(
'password',
'Please enter the password for your test account',
secret=True, description='The test account password.')
options.register(
'clearcache',
'Delete cached data? (enter true or false)',
description=('If set to true, any temporary files which cache test'
' requests and responses will be deleted.'),
default='true')
options.register(
'savecache',
'Save requests and responses in a temporary file? (enter true or false)',
description=('If set to true, requests to the server and responses will'
' be saved in temporary files.'),
default='false')
options.register(
'runlive',
'Run the live tests which contact the server? (enter true or false)',
description=('If set to true, the tests will make real HTTP requests to'
' the servers. This slows down test execution and may'
' modify the users data, be sure to use a test account.'),
default='true')
options.register(
'host',
'Run the live tests against the given host',
description='Examples: docs.google.com, spreadsheets.google.com, etc.',
default='')
options.register(
'ssl',
'Run the live tests over SSL (enter true or false)',
description='If set to true, all tests will be performed over HTTPS (SSL)',
default='false')
options.register(
'clean',
'Clean ALL data first before and after each test (enter true or false)',
description='If set to true, all tests will remove all data (DANGEROUS)',
default='false')
options.register(
'appsusername',
'Please enter the email address of your test Apps domain account',
description=('The email address you want to sign in with. '
'Make sure this is a test account on your Apps domain as '
'these tests may edit or delete data.'))
options.register(
'appspassword',
'Please enter the password for your test Apps domain account',
secret=True, description='The test Apps account password.')
# Other options which may be used if needed.
BLOG_ID_OPTION = Option(
'blogid',
'Please enter the ID of your test blog',
description=('The blog ID for the blog which should have test posts added'
' to it. Example 7682659670455539811'))
TEST_IMAGE_LOCATION_OPTION = Option(
'imgpath',
'Please enter the full path to a test image to upload',
description=('This test image will be uploaded to a service which'
' accepts a media file, it must be a jpeg.'))
SPREADSHEET_ID_OPTION = Option(
'spreadsheetid',
'Please enter the ID of a spreadsheet to use in these tests',
description=('The spreadsheet ID for the spreadsheet which should be'
' modified by theses tests.'))
APPS_DOMAIN_OPTION = Option(
'appsdomain',
'Please enter your Google Apps domain',
description=('The domain the Google Apps is hosted on or leave blank'
' if n/a'))
SITES_NAME_OPTION = Option(
'sitename',
'Please enter name of your Google Site',
description='The webspace name of the Site found in its URL.')
PROJECT_NAME_OPTION = Option(
'project_name',
'Please enter the name of your project hosting project',
description=('The name of the project which should have test issues added'
' to it. Example gdata-python-client'))
ISSUE_ASSIGNEE_OPTION = Option(
'issue_assignee',
'Enter the email address of the target owner of the updated issue.',
description=('The email address of the user a created issue\'s owner will '
' become. Example testuser2@gmail.com'))
GA_TABLE_ID = Option(
'table_id',
'Enter the Table ID of the Google Analytics profile to test',
description=('The Table ID of the Google Analytics profile to test.'
' Example ga:1174'))
TARGET_USERNAME_OPTION = Option(
'targetusername',
'Please enter the username (without domain) of the user which will be'
' affected by the tests',
description=('The username of the user to be tested'))
YT_DEVELOPER_KEY_OPTION = Option(
'developerkey',
'Please enter your YouTube developer key',
description=('The YouTube developer key for your account'))
YT_CLIENT_ID_OPTION = Option(
'clientid',
'Please enter your YouTube client ID',
description=('The YouTube client ID for your account'))
YT_VIDEO_ID_OPTION= Option(
'videoid',
'Please enter the ID of a YouTube video you uploaded',
description=('The video ID of a YouTube video uploaded to your account'))
# Functions to inject a cachable HTTP client into a service client.
def configure_client(client, case_name, service_name, use_apps_auth=False):
"""Sets up a mock client which will reuse a saved session.
Should be called during setUp of each unit test.
Handles authentication to allow the GDClient to make requests which
require an auth header.
Args:
client: a gdata.GDClient whose http_client member should be replaced
with a atom.mock_http_core.MockHttpClient so that repeated
executions can used cached responses instead of contacting
the server.
case_name: str The name of the test case class. Examples: 'BloggerTest',
'ContactsTest'. Used to save a session
for the ClientLogin auth token request, so the case_name
should be reused if and only if the same username, password,
and service are being used.
service_name: str The service name as used for ClientLogin to identify
the Google Data API being accessed. Example: 'blogger',
'wise', etc.
use_apps_auth: bool (optional) If set to True, use appsusername and
appspassword command-line args instead of username and
password respectively.
"""
# Use a mock HTTP client which will record and replay the HTTP traffic
# from these tests.
client.http_client = atom.mock_http_core.MockHttpClient()
client.http_client.cache_case_name = case_name
# Getting the auth token only needs to be done once in the course of test
# runs.
auth_token_key = '%s_auth_token' % service_name
if (auth_token_key not in options.values
and options.get_value('runlive') == 'true'):
client.http_client.cache_test_name = 'client_login'
cache_name = client.http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
client.http_client.delete_session(cache_name)
client.http_client.use_cached_session(cache_name)
if not use_apps_auth:
username = options.get_value('username')
password = options.get_value('password')
else:
username = options.get_value('appsusername')
password = options.get_value('appspassword')
auth_token = client.client_login(username, password, case_name,
service=service_name)
options.values[auth_token_key] = gdata.gauth.token_to_blob(auth_token)
if client.alt_auth_service is not None:
options.values[client.alt_auth_service] = gdata.gauth.token_to_blob(
client.alt_auth_token)
client.http_client.close_session()
# Allow a config auth_token of False to prevent the client's auth header
# from being modified.
if auth_token_key in options.values:
client.auth_token = gdata.gauth.token_from_blob(
options.values[auth_token_key])
if client.alt_auth_service is not None:
client.alt_auth_token = gdata.gauth.token_from_blob(
options.values[client.alt_auth_service])
if options.get_value('host'):
client.host = options.get_value('host')
def configure_cache(client, test_name):
"""Loads or begins a cached session to record HTTP traffic.
Should be called at the beginning of each test method.
Args:
client: a gdata.GDClient whose http_client member has been replaced
with a atom.mock_http_core.MockHttpClient so that repeated
executions can used cached responses instead of contacting
the server.
test_name: str The name of this test method. Examples:
'TestClass.test_x_works', 'TestClass.test_crud_operations'.
This is used to name the recording of the HTTP requests and
responses, so it should be unique to each test method in the
test case.
"""
# Auth token is obtained in configure_client which is called as part of
# setUp.
client.http_client.cache_test_name = test_name
cache_name = client.http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
client.http_client.delete_session(cache_name)
client.http_client.use_cached_session(cache_name)
def close_client(client):
"""Saves the recoded responses to a temp file if the config file allows.
This should be called in the unit test's tearDown method.
Checks to see if the 'savecache' option is set to 'true', to make sure we
only save sessions to repeat if the user desires.
"""
if client and options.get_value('savecache') == 'true':
# If this was a live request, save the recording.
client.http_client.close_session()
def configure_service(service, case_name, service_name):
"""Sets up a mock GDataService v1 client to reuse recorded sessions.
Should be called during setUp of each unit test. This is a duplicate of
configure_client, modified to handle old v1 service classes.
"""
service.http_client.v2_http_client = atom.mock_http_core.MockHttpClient()
service.http_client.v2_http_client.cache_case_name = case_name
# Getting the auth token only needs to be done once in the course of test
# runs.
auth_token_key = 'service_%s_auth_token' % service_name
if (auth_token_key not in options.values
and options.get_value('runlive') == 'true'):
service.http_client.v2_http_client.cache_test_name = 'client_login'
cache_name = service.http_client.v2_http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
service.http_client.v2_http_client.delete_session(cache_name)
service.http_client.v2_http_client.use_cached_session(cache_name)
service.ClientLogin(options.get_value('username'),
options.get_value('password'),
service=service_name, source=case_name)
options.values[auth_token_key] = service.GetClientLoginToken()
service.http_client.v2_http_client.close_session()
if auth_token_key in options.values:
service.SetClientLoginToken(options.values[auth_token_key])
def configure_service_cache(service, test_name):
"""Loads or starts a session recording for a v1 Service object.
Duplicates the behavior of configure_cache, but the target for this
function is a v1 Service object instead of a v2 Client.
"""
service.http_client.v2_http_client.cache_test_name = test_name
cache_name = service.http_client.v2_http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
service.http_client.v2_http_client.delete_session(cache_name)
service.http_client.v2_http_client.use_cached_session(cache_name)
def close_service(service):
if service and options.get_value('savecache') == 'true':
# If this was a live request, save the recording.
service.http_client.v2_http_client.close_session()
def build_suite(classes):
"""Creates a TestSuite for all unit test classes in the list.
Assumes that each of the classes in the list has unit test methods which
begin with 'test'. Calls unittest.makeSuite.
Returns:
A new unittest.TestSuite containing a test suite for all classes.
"""
suites = [unittest.makeSuite(a_class, 'test') for a_class in classes]
return unittest.TestSuite(suites)
def check_data_classes(test, classes):
import inspect
for data_class in classes:
test.assert_(data_class.__doc__ is not None,
'The class %s should have a docstring' % data_class)
if hasattr(data_class, '_qname'):
qname_versions = None
if isinstance(data_class._qname, tuple):
qname_versions = data_class._qname
else:
qname_versions = (data_class._qname,)
for versioned_qname in qname_versions:
test.assert_(isinstance(versioned_qname, str),
'The class %s has a non-string _qname' % data_class)
test.assert_(not versioned_qname.endswith('}'),
'The _qname for class %s is only a namespace' % (
data_class))
for attribute_name, value in data_class.__dict__.items():
# Ignore all elements that start with _ (private members)
if not attribute_name.startswith('_'):
try:
if not (isinstance(value, str) or inspect.isfunction(value)
or (isinstance(value, list)
and issubclass(value[0], atom.core.XmlElement))
or type(value) == property # Allow properties.
or inspect.ismethod(value) # Allow methods.
or inspect.ismethoddescriptor(value) # Allow method descriptors.
# staticmethod et al.
or issubclass(value, atom.core.XmlElement)):
test.fail(
'XmlElement member should have an attribute, XML class,'
' or list of XML classes as attributes.')
except TypeError:
test.fail('Element %s in %s was of type %s' % (
attribute_name, data_class._qname, type(value)))
def check_clients_with_auth(test, classes):
for client_class in classes:
test.assert_(hasattr(client_class, 'api_version'))
test.assert_(isinstance(client_class.auth_service, (str, int)))
test.assert_(hasattr(client_class, 'auth_service'))
test.assert_(isinstance(client_class.auth_service, str))
test.assert_(hasattr(client_class, 'auth_scopes'))
test.assert_(isinstance(client_class.auth_scopes, (list, tuple)))
| apache-2.0 |
Silviumik/Silviu_Kernel_I9195_LTE_KitKat | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
leahrnh/ticktock_text_api | breakdown_detector.py | 1 | 1533 | import readall
import gensim
import nltk
import numpy as np
import pickle
# we need to extract some features, now we make it easy now to just use the word2vec, one turn previous turn.
#
model = gensim.models.Word2Vec.load('/tmp/word2vec_50_break')
all_v1 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v1')
all_v2 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v2')
all_v3 = readall.readall('/home/ubuntu/zhou/Backend/rating_log/v3')
all_logs = dict(all_v1.items() + all_v2.items() + all_v3.items())
sent_vec = None
for item in all_logs:
print item
conv = all_logs[item]["Turns"]
for turn in conv:
turn_vec_1 = sum(model[nltk.word_tokenize(conv[turn]["You"])])
if len(nltk.word_tokenize(conv[turn]["TickTock"])) ==0:
continue
#print 'TickTock'
#print conv[turn]["TickTock"]
turn_vec_2 = sum(model[nltk.word_tokenize(conv[turn]["TickTock"])])
#print turn_vec_1
#print turn_vec_2
if sent_vec is None:
sent_vec = np.hstack((turn_vec_1,turn_vec_2))
target = np.array(int(conv[turn]["Appropriateness"]))
else:
sent_vec = np.vstack((sent_vec,np.hstack((turn_vec_1,turn_vec_2))))
target = np.hstack((target,int(conv[turn]["Appropriateness"])))
sent = {'data':sent_vec,'target':target}
print sent
with open('sent.pkl','w') as f:
pickle.dump(sent,f)
| gpl-2.0 |
polimediaupv/edx-platform | common/djangoapps/student/migrations/0027_add_active_flag_and_mode_to_courseware_enrollment.py | 114 | 15224 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseEnrollment.is_active'
db.add_column('student_courseenrollment', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'CourseEnrollment.mode'
db.add_column('student_courseenrollment', 'mode',
self.gf('django.db.models.fields.CharField')(default='honor', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseEnrollment.is_active'
db.delete_column('student_courseenrollment', 'is_active')
# Deleting field 'CourseEnrollment.mode'
db.delete_column('student_courseenrollment', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
Dziolas/invenio | modules/bibknowledge/lib/bibknowledgeadmin.py | 18 | 31208 | ## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibKnowledge Administrator Interface."""
import os
import cgi
import sys
from invenio import bibknowledge, bibknowledgeadminlib
from invenio.bibrankadminlib import check_user
from invenio.webpage import page, error_page
from invenio.webuser import getUid, page_not_authorized
from invenio.messages import wash_language, gettext_set_language
from invenio.urlutils import wash_url_argument, redirect_to_url
from invenio.config import CFG_SITE_LANG, CFG_SITE_SECURE_URL, \
CFG_SITE_NAME, CFG_WEBDIR
__lastupdated__ = """$Date$"""
def index(req, ln=CFG_SITE_LANG, search="", descriptiontoo=""):
"""
handle the bibknowledgeadmin.py/kb_manage call
@param search search for a substring in kb names
@param descriptiontoo .. and descriptions
"""
return kb_manage(req, ln, search, descriptiontoo)
def kb_manage(req, ln=CFG_SITE_LANG, search="", descriptiontoo=""):
"""
Main BibKnowledge administration page.
@param ln language
@param search search for a substring in kb names
@param descriptiontoo .. and descriptions
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
warnings = []
# Check if user is authorized to administer
# If not, still display page but offer to log in
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
is_admin = True
else:
is_admin = False
navtrail = '''<a class="navtrail" href="%s/help/admin">%s</a>''' % \
(CFG_SITE_SECURE_URL, _("Admin Area"))
if is_admin:
return page(title=_("BibKnowledge Admin"),
body=bibknowledgeadminlib.perform_request_knowledge_bases_management(ln=ln, search=search, descriptiontoo=descriptiontoo),
language=ln,
uid=uid,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req,
warnings=warnings)
else:
#redirect to login
return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail)
def kb_upload(req, kb, ln=CFG_SITE_LANG):
"""
Uploads file rdffile.
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail = '''<a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % \
(CFG_SITE_SECURE_URL, ln, _("Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
#get the form
form = req.form
#get the contents from the form
if not form.has_key('file') or not form['file'].filename:
return page(title=_("Cannot upload file"),
body = _("You have not selected a file to upload"),
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req)
fileitem = form['file']
uploaddir = CFG_WEBDIR+"/kbfiles"
#create a upload directory unless already exists
if os.path.isfile(uploaddir):
return page(title=_("Cannot upload file"),
body = "Cannot create directory " + \
uploaddir+" since it already" + \
" exists and it is a file",
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req)
if not os.path.isdir(uploaddir):
try:
os.mkdir(uploaddir)
except:
return page(title=_("Cannot upload file"),
body = "Cannot create directory "+uploaddir+ \
" maybe no access rights",
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req)
#if we are here we can try to write
#get the name and the file..
fn = str(kb_id)+".rdf"
open(uploaddir+"/"+fn, 'w').write(fileitem.file.read())
body = (_("File %s uploaded.") % ('kbfiles/' + cgi.escape(fn)))
body += " <a href='"+CFG_SITE_SECURE_URL+"/kb'>%s</a>" % _("Back")
return(page(title=_("File uploaded"),
body = body,
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req))
else:
return(page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail))
def kb_show(req, kb, sortby="to", ln=CFG_SITE_LANG, startat=0, search=""):
"""
Shows the content of the given knowledge base id. Check for authentication and kb existence.
Before displaying the content of the knowledge base, check if a form was submitted asking for
adding, editing or removing a value.
@param ln language
@param kb the kb id to show
@param sortby the sorting criteria ('from' or 'to')
@param startat the number from which start showing mapping rules in kb
@param search search for this string in the kb
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = '''
> <a class="navtrail"
href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL,
ln, _("Manage Knowledge Bases"))
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
return page(title=_("Knowledge Base %s" % kb_name),
body=bibknowledgeadminlib.perform_request_knowledge_base_show(ln=ln,
kb_id=kb_id, sortby=sortby, startat=startat,
search_term=search),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_show_attributes(req, kb, ln=CFG_SITE_LANG, sortby="to"):
"""
Shows the attributes (name, description) of a given kb
@param ln language
@param kb the kb id to show
@param sortby the sorting criteria ('from' or 'to')
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
return page(title=_("Knowledge Base %s Attributes" % kb_name),
body=bibknowledgeadminlib.perform_request_knowledge_base_show_attributes(ln=ln,
kb_id=kb_id,
sortby=sortby),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req, text=auth_msg,
navtrail=navtrail_previous_links)
def kb_dynamic_update(req, kb_id, field, expression, collection,
ln=CFG_SITE_LANG):
"""
Updates the configuration of a collection based KB by checking user
rights and calling bibknowledgeadminlib..
@param req request
@param kb_id knowledge base id
@param field configured field for this dynamic kb
@param expression search expression
@param collection search in this collection
@param ln language
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
#actual config call
err = bibknowledgeadminlib.perform_update_kb_config(kb_id, field,
expression,
collection)
if err:
return page(title=_("Error"),
body = err,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb_id)s" % {'ln':ln, 'kb_id': kb_id })
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_show_dependencies(req, kb, ln=CFG_SITE_LANG, sortby="to"):
"""
Shows the dependencies of a given kb
@param kb the kb id to show
@param ln language
@param sortby the sorting criteria ('from' or 'to')
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
return page(title=_("Knowledge Base %s Dependencies" % kb_name),
body=bibknowledgeadminlib.perform_request_knowledge_base_show_dependencies(ln=ln,
kb_id=kb_id,
sortby=sortby),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_add_mapping(req, kb, mapFrom, mapTo, sortby="to", ln=CFG_SITE_LANG,
forcetype=None, replacements=None, kb_type=None):
"""
Adds a new mapping to a kb.
@param ln language
@param kb the kb id to show
@param sortby the sorting criteria ('from' or 'to')
@param forcetype indicates if this function should ask about replacing left/right sides (None or 'no')
replace in current kb ('curr') or in all ('all')
@param replacements an object containing kbname+++left+++right strings.
Can be a string or an array of strings
@param kb_type None for normal from-to kb's, 't' for taxonomies
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
key = wash_url_argument(mapFrom, 'str')
value = wash_url_argument(mapTo, 'str')
#check if key or value already exists in some KB
left_sides_match = bibknowledge.get_kb_mappings("", key, "")
#check that the match is exact
left_sides = []
for m in left_sides_match:
if m['key'] == key:
left_sides.append(m)
right_sides_match = bibknowledge.get_kb_mappings("", "", value)
right_sides = []
for m in right_sides_match:
if m['value'] == value:
right_sides.append(m)
if (len(right_sides) == 0) and (len(left_sides) == 0):
#no problems, just add in current
forcetype = "curr"
#likewise, if this is a taxonomy, just pass on
if kb_type == 't':
forcetype = "curr"
if forcetype and not forcetype == "no":
pass
else:
if len(left_sides) > 0:
return page(title=_("Left side exists"),
body = bibknowledgeadminlib.perform_request_verify_rule(ln, kb_id, key, value, "left", kb_name, left_sides),
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
if len(right_sides) > 0:
return page(title=_("Right side exists"),
body = bibknowledgeadminlib.perform_request_verify_rule(ln, kb_id, key, value, "right", kb_name, right_sides),
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
if forcetype == "curr":
bibknowledge.add_kb_mapping(kb_name, key, value)
if forcetype == "all":
#a bit tricky.. remove the rules given in param replacement and add the current
#rule in the same kb's
if replacements:
#"replacements" can be either a string or an array. Let's make it always an array
if type(replacements) == type("this is a string"):
mystr = replacements
replacements = []
replacements.append(mystr)
for r in replacements:
if r.find("++++") > 0:
(rkbname, rleft, dummy) = r.split('++++')
bibknowledge.remove_kb_mapping(rkbname, rleft)
#add only if this is not yet there..
if not bibknowledge.kb_mapping_exists(rkbname, key):
bibknowledge.add_kb_mapping(rkbname, key, value)
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb)s&sortby=%(sortby)s&kb_type=%(kb_type)s" % {'ln':ln,
'kb':kb_id,
'sortby':sortby,
'kb_type':kb_type})
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_edit_mapping(req, kb, key, mapFrom, mapTo,
update="", delete="", sortby="to", ln=CFG_SITE_LANG):
"""
Edit a mapping to in kb. Edit can be "update old value" or "delete existing value"
@param kb the knowledge base id to edit
@param key the key of the mapping that will be modified
@param mapFrom the new key of the mapping
@param mapTo the new value of the mapping
@param update contains a value if the mapping is to be updated
@param delete contains a value if the mapping is to be deleted
@param sortby the sorting criteria ('from' or 'to')
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
key = wash_url_argument(key, 'str')
if delete != "":
#Delete
bibknowledge.remove_kb_mapping(kb_name, key)
if update != "":
#Update
new_key = wash_url_argument(mapFrom, 'str')
new_value = wash_url_argument(mapTo, 'str')
bibknowledge.update_kb_mapping(kb_name, key, new_key, new_value)
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb)s&sortby=%(sortby)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby})
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def uniq(alist):
"""a simple uniquer, return unique members of the list"""
myset = {}
return [myset.setdefault(e, e) for e in alist if e not in myset]
def kb_update_attributes(req, kb="", name="", description="", sortby="to",
ln=CFG_SITE_LANG, chosen_option=None, kb_type=None):
"""
Update the attributes of the kb
@param ln language
@param kb the kb id to update
@param sortby the sorting criteria ('from' or 'to')
@param name the new name of the kn
@param description the new description of the kb
@param chosen_option set to dialog box value
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
if chosen_option is not None:
# Update could not be performed.
# Redirect to kb attributes page
redirect_to_url(req, "kb?ln=%(ln)s&action=attributes&kb=%(kb)s&sortby=%(sortby)s&kb_type=%(kb_type)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby, 'kb_type':kb_type})
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
new_name = wash_url_argument(name, 'str')
if kb_name != new_name and bibknowledge.kb_exists(new_name):
#A knowledge base with that name already exist
#Do not update
return dialog_box(req=req,
ln=ln,
title="Name already in use",
message="""<i>%s</i> cannot be renamed to %s:
Another knowledge base already has that name.
<br/>Please choose another name.""" % (kb_name,
new_name),
navtrail=navtrail_previous_links,
options=[ _("Ok")])
new_desc = wash_url_argument(description, 'str')
bibknowledge.update_kb_attributes(kb_name, new_name, new_desc)
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb)s&sortby=%(sortby)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby})
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_export(req, kbname="", format="kbr", searchkey="", searchvalue="", searchtype="s", limit=None, ln=CFG_SITE_LANG):
"""
Exports the given kb so that it is listed in stdout (the browser).
@param req the request
@param kbname knowledge base name
@param expression evaluate this for the returned lines
@param format 'kba' for authority file, 'kbr' for leftside-rightside, json
for json-formatted dictionaries
@param searchkey include only lines that match this on the left side
@param searchvalue include only lines that match this on the right side
@param searchtype s = substring match, e = exact match
@param limit how many results to return. None means all
@param ln language
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
if not kbname:
return page(title=_("Knowledge base name missing"),
body = """Required parameter kbname
is missing.""",
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
#in order to make 'wget' downloads easy we do not require authorization
#first check the type of the KB
kbtype = None
kbinfo = None
kbid = None
kbinfos = bibknowledge.get_kbs_info("", kbname)
if kbinfos:
kbinfo = kbinfos[0]
kbtype = kbinfo['kbtype']
kbid = kbinfo['id']
else:
return page(title=_("Unknown knowledge base"),
body = _("There is no knowledge base with that name."),
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
if not kbtype or kbtype == 'w':
if format and format == "ejson":
req.content_type = 'application/json'
return bibknowledge.get_kb_mappings_embedded_json(kbname, searchkey, \
searchvalue, searchtype, limit)
elif format and format[0] == 'j':
# as JSON formatted string
req.content_type = 'application/json'
return bibknowledge.get_kb_mappings_json(kbname, searchkey, \
searchvalue, searchtype, limit)
# left side / right side KB
mappings = bibknowledge.get_kb_mappings(kbname, searchkey, \
searchvalue, searchtype)
if format == 'right' or format == 'kba':
# as authority sequence
seq = [m['value'] for m in mappings]
seq = uniq(sorted(seq))
for s in seq:
req.write(s+"\n");
return
else:
# as regularly formatted left-right mapping
for m in mappings:
req.write(m['key'] + '---' + m['value'] + '\n')
return
elif kbtype == 'd':
# dynamic kb, another interface for perform_request_search
if format and format[0] == 'j':
req.content_type = "application/json"
return bibknowledge.get_kbd_values_json(kbname, searchvalue)
else:
# print it as a list of values
for hit in bibknowledge.get_kbd_values(kbname, searchvalue):
req.write(hit + '\n')
req.write('\n')
return
elif kbtype == 't': #taxonomy: output the file
kbfilename = CFG_WEBDIR+"/kbfiles/"+str(kbid)+".rdf"
try:
f = open(kbfilename, 'r')
for line in f:
req.write(line)
f.close()
except:
req.write("Reading the file "+kbfilename+" failed.")
else:
# This situation should never happen
raise ValueError, "Unsupported KB Type: %s" % kbtype
def kb_add(req, ln=CFG_SITE_LANG, sortby="to", kbtype=""):
"""
Adds a new kb
@param req the request
@param ln language
@param sortby to or from
@param kbtype type of knowledge base. one of: "", taxonomy, dynamic
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
name = "Untitled"
if kbtype == "taxonomy":
name = "Untitled Taxonomy"
if kbtype == "dynamic":
name = "Untitled dynamic"
kb_id = bibknowledge.add_kb(kb_name=name, kb_type=kbtype)
redirect_to_url(req, "kb?ln=%(ln)s&action=attributes&kb=%(kb)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby})
else:
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_delete(req, kb, ln=CFG_SITE_LANG, chosen_option=""):
"""
Deletes an existing kb
@param kb the kb id to delete
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a> > %s''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"), _("Delete Knowledge Base"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
#Ask confirmation to user if not already done
chosen_option = wash_url_argument(chosen_option, 'str')
if chosen_option == "":
return dialog_box(req=req,
ln=ln,
title="Delete %s" % kb_name,
message="""Are you sure you want to
delete knowledge base <i>%s</i>?""" % kb_name,
navtrail=navtrail_previous_links,
options=[_("Cancel"), _("Delete")])
elif chosen_option==_("Delete"):
bibknowledge.delete_kb(kb_name)
redirect_to_url(req, "kb?ln=%(ln)s" % {'ln':ln})
else:
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb">%s</a>''' % (CFG_SITE_SECURE_URL, _("Manage Knowledge Bases"))
return page_not_authorized(req=req, text=auth_msg,
navtrail=navtrail_previous_links)
def dialog_box(req, url="", ln=CFG_SITE_LANG, navtrail="",
title="", message="", options=None):
"""
Returns a dialog box with a given title, message and options.
Used for asking confirmation on actions.
The page that will receive the result must take 'chosen_option' as parameter.
@param url the url used to submit the options chosen by the user
@param options the list of labels for the buttons given as choice to user
"""
import invenio
bibformat_templates = invenio.template.load('bibformat')
if not options:
options = []
return page(title="",
body = bibformat_templates.tmpl_admin_dialog_box(url,
title,
message,
options),
language=ln,
lastupdated=__lastupdated__,
navtrail=navtrail,
req=req)
| gpl-2.0 |
gltn/stdm | stdm/third_party/FontTools/fontTools/pens/momentsPen.py | 1 | 9410 | """Pen calculating 0th, 1st, and 2nd moments of area of glyph shapes.
This is low-level, autogenerated pen. Use statisticsPen instead."""
from fontTools.misc.py23 import *
from fontTools.pens.basePen import BasePen
__all__ = ["MomentsPen"]
class MomentsPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.area = 0
self.momentX = 0
self.momentY = 0
self.momentXX = 0
self.momentXY = 0
self.momentYY = 0
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
r0 = x1*y0
r1 = x1*y1
r2 = x1**2
r3 = x0**2
r4 = 2*y0
r5 = y0 - y1
r6 = r5*x0
r7 = y0**2
r8 = y1**2
r9 = x1**3
r10 = r4*y1
r11 = y0**3
r12 = y1**3
self.area += -r0/2 - r1/2 + x0*(y0 + y1)/2
self.momentX += -r2*y0/6 - r2*y1/3 + r3*(r4 + y1)/6 - r6*x1/6
self.momentY += -r0*y1/6 - r7*x1/6 - r8*x1/6 + x0*(r7 + r8 + y0*y1)/6
self.momentXX += -r2*r6/12 - r3*r5*x1/12 - r9*y0/12 - r9*y1/4 + x0**3*(3*y0 + y1)/12
self.momentXY += -r10*r2/24 - r2*r7/24 - r2*r8/8 + r3*(r10 + 3*r7 + r8)/24 - x0*x1*(r7 - r8)/12
self.momentYY += -r0*r8/12 - r1*r7/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r7*y1 + r8*y0)/12
def _qCurveToOne(self, p1, p2):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
r0 = 2*x1
r1 = r0*y2
r2 = 2*y1
r3 = r2*x2
r4 = 3*y2
r5 = r4*x2
r6 = 3*y0
r7 = x1**2
r8 = 2*y2
r9 = x2**2
r10 = 4*y1
r11 = 10*y2
r12 = r0*x2
r13 = x0**2
r14 = 10*y0
r15 = x2*y2
r16 = r0*y1 + r15
r17 = 4*x1
r18 = x2*y0
r19 = r10*r15
r20 = y1**2
r21 = 2*r20
r22 = y2**2
r23 = r22*x2
r24 = 5*r23
r25 = y0**2
r26 = y0*y2
r27 = 5*r25
r28 = 8*x1**3
r29 = x2**3
r30 = 30*y1
r31 = 6*y1
r32 = 10*r9*x1
r33 = 4*r7
r34 = 5*y2
r35 = 12*r7
r36 = r5 + 20*x1*y1
r37 = 30*x1
r38 = 12*x1
r39 = 20*r7
r40 = 8*r7*y1
r41 = r34*r9
r42 = 60*y1
r43 = 20*r20
r44 = 4*r20
r45 = 15*r22
r46 = r38*x2
r47 = y1*y2
r48 = 8*r20*x1 + r24
r49 = 6*x1
r50 = 8*y1**3
r51 = y2**3
r52 = y0**3
r53 = 10*y1
r54 = 12*y1
r55 = 12*r20
self.area += r1/6 - r3/6 - r5/6 + x0*(r2 + r6 + y2)/6 - y0*(r0 + x2)/6
self.momentX += -r10*r9/30 - r11*r9/30 - r12*(-r8 + y1)/30 + r13*(r10 + r14 + y2)/30 + r7*r8/30 + x0*(r1 + r16 - r17*y0 - r18)/30 - y0*(r12 + 2*r7 + r9)/30
self.momentY += r1*(r8 + y1)/30 - r19/30 - r21*x2/30 - r24/30 - r25*(r17 + x2)/30 + x0*(r10*y0 + r2*y2 + r21 + r22 + r26 + r27)/30 - y0*(r16 + r3)/30
self.momentXX += r13*(r11*x1 - 5*r18 + r3 + r36 - r37*y0)/420 + r28*y2/420 - r29*r30/420 - r29*y2/4 - r32*(r2 - r4)/420 - r33*x2*(r2 - r34)/420 + x0**3*(r31 + 21*y0 + y2)/84 - x0*(-r15*r38 + r18*r38 + r2*r9 - r35*y2 + r39*y0 - r40 - r41 + r6*r9)/420 - y0*(r28 + 5*r29 + r32 + r35*x2)/420
self.momentXY += r13*(r14*y2 + 3*r22 + 105*r25 + r42*y0 + r43 + 12*r47)/840 - r17*x2*(r44 - r45)/840 - r22*r9/8 - r25*(r39 + r46 + 3*r9)/840 + r33*y2*(r10 + r34)/840 - r42*r9*y2/840 - r43*r9/840 + x0*(-r10*r18 + r17*r26 + r19 + r22*r49 - r25*r37 - r27*x2 + r38*r47 + r48)/420 - y0*(r15*r17 + r31*r9 + r40 + r41 + r46*y1)/420
self.momentYY += r1*(r11*y1 + r44 + r45)/420 - r15*r43/420 - r23*r30/420 - r25*(r1 + r36 + r53*x2)/420 - r50*x2/420 - r51*x2/12 - r52*(r49 + x2)/84 + x0*(r22*r53 + r22*r6 + r25*r30 + r25*r34 + r26*r54 + r43*y0 + r50 + 5*r51 + 35*r52 + r55*y2)/420 - y0*(-r0*r22 + r15*r54 + r48 + r55*x2)/420
def _curveToOne(self, p1, p2, p3):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
r0 = 6*x2
r1 = r0*y3
r2 = 6*y2
r3 = 10*y3
r4 = r3*x3
r5 = 3*x1
r6 = 3*y1
r7 = 6*x1
r8 = 3*x2
r9 = 6*y1
r10 = 3*y2
r11 = x2**2
r12 = r11*y3
r13 = 45*r12
r14 = x3**2
r15 = r14*y2
r16 = r14*y3
r17 = x2*x3
r18 = 15*r17
r19 = 7*y3
r20 = x1**2
r21 = 9*r20
r22 = x0**2
r23 = 21*y1
r24 = 9*r11
r25 = 9*x2
r26 = x2*y3
r27 = 15*r26
r28 = -r25*y1 + r27
r29 = r25*y2
r30 = r9*x3
r31 = 45*x1
r32 = x1*x3
r33 = 45*r20
r34 = 5*r14
r35 = x2*y2
r36 = 18*r35
r37 = 5*x3
r38 = r37*y3
r39 = r31*y1 + r36 + r38
r40 = x1*y0
r41 = x1*y3
r42 = x2*y0
r43 = x3*y1
r44 = r10*x3
r45 = x3*y2*y3
r46 = y2**2
r47 = 45*r46
r48 = r47*x3
r49 = y3**2
r50 = r49*x3
r51 = y1**2
r52 = 9*r51
r53 = y0**2
r54 = 21*x1
r55 = x3*y2
r56 = 15*r55
r57 = 9*y2
r58 = y2*y3
r59 = 15*r58
r60 = 9*r46
r61 = 3*y3
r62 = 45*y1
r63 = r8*y3
r64 = y0*y1
r65 = y0*y2
r66 = 30*r65
r67 = 5*y3
r68 = y1*y3
r69 = 45*r51
r70 = 5*r49
r71 = x2**3
r72 = x3**3
r73 = 126*x3
r74 = x1**3
r75 = r14*x2
r76 = 63*r11
r77 = r76*x3
r78 = 15*r35
r79 = r19*x3
r80 = x1*y1
r81 = 63*r35
r82 = r38 + 378*r80 + r81
r83 = x1*y2
r84 = x2*y1
r85 = x3*y0
r86 = x2*x3*y1
r87 = x2*x3*y3
r88 = r11*y2
r89 = 27*r88
r90 = 42*y3
r91 = r14*r90
r92 = 90*x1*x2
r93 = 189*x2
r94 = 30*x1*x3
r95 = 14*r16 + 126*r20*y1 + 45*r88 + r94*y2
r96 = x1*x2
r97 = 252*r96
r98 = x1*x2*y2
r99 = 42*r32
r100 = x1*x3*y1
r101 = 30*r17
r102 = 18*r17
r103 = 378*r20
r104 = 189*y2
r105 = r20*y3
r106 = r11*y1
r107 = r14*y1
r108 = 378*r46
r109 = 252*y2
r110 = y1*y2
r111 = x2*x3*y2
r112 = y0*y3
r113 = 378*r51
r114 = 63*r46
r115 = 27*x2
r116 = r115*r46 + 42*r50
r117 = x2*y1*y3
r118 = x3*y1*y2
r119 = r49*x2
r120 = r51*x3
r121 = x3*y3
r122 = 14*x3
r123 = 30*r117 + r122*r49 + r47*x2 + 126*r51*x1
r124 = x1*y1*y3
r125 = x1*y2*y3
r126 = x2*y1*y2
r127 = 54*y3
r128 = 21*r55
r129 = 630*r53
r130 = r46*x1
r131 = r49*x1
r132 = 126*r53
r133 = y2**3
r134 = y3**3
r135 = 630*r49
r136 = y1**3
r137 = y0**3
r138 = r114*y3 + r23*r49
r139 = r49*y2
self.area += r1/20 - r2*x3/20 - r4/20 + r5*(y2 + y3)/20 - r6*(x2 + x3)/20 + x0*(r10 + r9 + 10*y0 + y3)/20 - y0*(r7 + r8 + x3)/20
self.momentX += r13/840 - r15/8 - r16/3 - r18*(r10 - r19)/840 + r21*(r10 + 2*y3)/840 + r22*(r2 + r23 + 56*y0 + y3)/168 + r5*(r28 + r29 - r30 + r4)/840 - r6*(10*r14 + r18 + r24)/840 + x0*(12*r26 + r31*y2 - r37*y0 + r39 - 105*r40 + 15*r41 - 30*r42 - 3*r43 + r44)/840 - y0*(18*r11 + r18 + r31*x2 + 12*r32 + r33 + r34)/840
self.momentY += r27*(r10 + r19)/840 - r45/8 - r48/840 + r5*(10*r49 + r57*y1 + r59 + r60 + r9*y3)/840 - r50/6 - r52*(r8 + 2*x3)/840 - r53*(r0 + r54 + x3)/168 - r6*(r29 + r4 + r56)/840 + x0*(18*r46 + 140*r53 + r59 + r62*y2 + 105*r64 + r66 + r67*y0 + 12*r68 + r69 + r70)/840 - y0*(r39 + 15*r43 + 12*r55 - r61*x1 + r62*x2 + r63)/840
self.momentXX += -r11*r73*(-r61 + y2)/9240 + r21*(r28 - r37*y1 + r44 + r78 + r79)/9240 + r22*(21*r26 - 630*r40 + 42*r41 - 126*r42 + r57*x3 + r82 + 210*r83 + 42*r84 - 14*r85)/9240 - r5*(r11*r62 + r14*r23 + 14*r15 - r76*y3 + 54*r86 - 84*r87 - r89 - r91)/9240 - r6*(27*r71 + 42*r72 + 70*r75 + r77)/9240 + 3*r71*y3/220 - 3*r72*y2/44 - r72*y3/4 + 3*r74*(r57 + r67)/3080 - r75*(378*y2 - 630*y3)/9240 + x0**3*(r57 + r62 + 165*y0 + y3)/660 + x0*(-18*r100 - r101*y0 - r101*y1 + r102*y2 - r103*y0 + r104*r20 + 63*r105 - 27*r106 - 9*r107 + r13 - r34*y0 - r76*y0 + 42*r87 + r92*y3 + r94*y3 + r95 - r97*y0 + 162*r98 - r99*y0)/9240 - y0*(135*r11*x1 + r14*r54 + r20*r93 + r33*x3 + 45*r71 + 14*r72 + 126*r74 + 42*r75 + r77 + r92*x3)/9240
self.momentXY += -r108*r14/18480 + r12*(r109 + 378*y3)/18480 - r14*r49/8 - 3*r14*r58/44 - r17*(252*r46 - 1260*r49)/18480 + r21*(18*r110 + r3*y1 + 15*r46 + 7*r49 + 18*r58)/18480 + r22*(252*r110 + 28*r112 + r113 + r114 + 2310*r53 + 30*r58 + 1260*r64 + 252*r65 + 42*r68 + r70)/18480 - r52*(r102 + 15*r11 + 7*r14)/18480 - r53*(r101 + r103 + r34 + r76 + r97 + r99)/18480 + r7*(-r115*r51 + r116 + 18*r117 - 18*r118 + 42*r119 - 15*r120 + 28*r45 + r81*y3)/18480 - r9*(63*r111 + 42*r15 + 28*r87 + r89 + r91)/18480 + x0*(r1*y0 + r104*r80 + r112*r54 + 21*r119 - 9*r120 - r122*r53 + r123 + 54*r124 + 60*r125 + 54*r126 + r127*r35 + r128*y3 - r129*x1 + 81*r130 + 15*r131 - r132*x2 - r2*r85 - r23*r85 + r30*y3 + 84*r40*y2 - 84*r42*y1 + r60*x3)/9240 - y0*(54*r100 - 9*r105 + 81*r106 + 15*r107 + 54*r111 + r121*r7 + 21*r15 + r24*y3 + 60*r86 + 21*r87 + r95 + 189*r96*y1 + 54*r98)/9240
self.momentYY += -r108*r121/9240 - r133*r73/9240 - r134*x3/12 - r135*r55/9240 - 3*r136*(r25 + r37)/3080 - r137*(r25 + r31 + x3)/660 + r26*(r135 + 126*r46 + 378*y2*y3)/9240 + r5*(r110*r127 + 27*r133 + 42*r134 + r138 + 70*r139 + r46*r62 + 27*r51*y2 + 15*r51*y3)/9240 - r52*(r56 + r63 + r78 + r79)/9240 - r53*(r128 + r25*y3 + 42*r43 + r82 + 42*r83 + 210*r84)/9240 - r6*(r114*x3 + r116 - 14*r119 + 84*r45)/9240 + x0*(r104*r51 + r109*r64 + 90*r110*y3 + r113*y0 + r114*y0 + r129*y1 + r132*y2 + 45*r133 + 14*r134 + 126*r136 + 770*r137 + r138 + 42*r139 + 135*r46*y1 + 14*r53*y3 + r64*r90 + r66*y3 + r69*y3 + r70*y0)/9240 - y0*(90*r118 + 63*r120 + r123 - 18*r124 - 30*r125 + 162*r126 - 27*r130 - 9*r131 + r36*y3 + 30*r43*y3 + 42*r45 + r48 + r51*r93)/9240
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('MomentsPen', [
('area', 1),
('momentX', x),
('momentY', y),
('momentXX', x**2),
('momentXY', x*y),
('momentYY', y**2),
])
| gpl-2.0 |
PercyODI/PythonCSharpOOComparison | Utilities/checkLinks.py | 1 | 1091 | import sys, os, re
pattern = re.compile('\[.+\]\((?P<file>.+?)\)', re.MULTILINE) # Matches [text](directory/file.md)
folderDict = {}
numBadLinks = 0;
os.chdir("..") # Assumes this utility is one directory deep.
startDirectory = os.path.abspath(".")
mdFiles = []
for root, subFolders, files in os.walk("."):
if("\." in root):
continue
for f in files:
if ".md" in f: # Only modify MarkDown files
mdFiles.append(os.path.abspath(os.path.join(root, f)))
for mdFile in mdFiles:
os.chdir(os.path.dirname(mdFile))
fileContent = open(mdFile, 'r')
for lineNum, line in enumerate(fileContent, start=1):
matches = pattern.findall(line)
for match in matches:
if not os.path.isfile(match):
numBadLinks += 1
print("\n")
print(os.path.relpath(mdFile, startDirectory) + ", line " + str(lineNum))
print("\t" + match + " is a bad link.")
print("\n")
if numBadLinks < 1:
print("No Bad Links Found!")
else:
print("Found " + str(numBadLinks) + " bad links.")
| mit |
ActiveState/code | recipes/Python/577746_Inherit_Method_Docstrings_Using_Only/recipe-577746.py | 1 | 2640 | """docfunc module"""
from deferred_binder import DeferredBinder
class DocFunc(DeferredBinder):
TRIGGER = None
def __init__(self, f):
super().__init__(f.__name__, f)
self.f = self.target
@staticmethod
def transform(name, context, target, obj=None):
"""The DeferredBinder transform for this subclass.
name - the attribute name to which the function will be bound.
context - the class/namespace to which the function will be bound.
target - the function that will be bound.
obj - ignored.
The DeferredBinder descriptor class will replace itself with the
result of this method, when the name to which the descriptor is requested
for the first time. This can be on the class or an instances of the
class.
This way the class to which the method is bound is available so that the
inherited docstring can be identified and set.
"""
namespace, cls = context
doc = target.__doc__
if doc == DocFunc.TRIGGER:
doc = DocFunc.get_doc(cls, name, DocFunc.TRIGGER)
target.__doc__ = doc
return target
@staticmethod
def get_doc(cls, fname, default=TRIGGER, member=True):
"""Returns the function docstring the method should inherit.
cls - the class from which to start looking for the method.
fname - the method name on that class
default - the docstring to return if none is found.
member - is the target function already bound to cls?
"""
print(cls)
bases = cls.__mro__[:]
if member:
bases = bases[1:]
for base in bases:
print(base)
func = getattr(base, fname, None)
if not func:
continue
doc = getattr(func, '__doc__', default)
if doc == default:
continue
return doc
return default
@staticmethod
def inherits_docstring(f, context=None, fname=None, default=TRIGGER):
"""A decorator that returns a new DocFunc object.
f - the function to decorate.
context - the class/namespace where the function is bound, if known.
fname - the function name in that context, if known.
default - the docstring to return if none is found.
"""
if context is not None:
cls, namespace = context
fname = fname or f.__name__
f.__doc__ = DocFunc.get_doc(cls, fname, default, False)
return f
return DocFunc(f, default)
| mit |
google/cauliflowervest | cauliflowervest/client/base_client.py | 1 | 9496 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base CauliflowerVestClient class."""
import httplib
import json
import logging
import ssl
import time
import urllib
import urllib2
import webbrowser
import httplib2
import oauth2client.client
import oauth2client.tools
from cauliflowervest import settings as base_settings
from cauliflowervest.client import settings
from cauliflowervest.client import util
# Prefix to prevent Cross Site Script Inclusion.
JSON_PREFIX = ")]}',\n"
class Error(Exception):
"""Class for domain specific exceptions."""
class UserAbort(Error):
"""User aborted process."""
class AuthenticationError(Error):
"""There was an error with authentication."""
class RequestError(Error):
"""There was an error interacting with the server."""
class NotFoundError(RequestError):
"""No passphrase was found."""
class MetadataError(Error):
"""There was an error with machine metadata."""
class CauliflowerVestClient(object):
"""Client to interact with the CauliflowerVest service."""
ESCROW_PATH = None # String path to escrow to, set by subclasses.
# Sequence of key names of metadata to require; see GetAndValidateMetadata().
REQUIRED_METADATA = []
# The metadata key under which the passphrase is stored.
PASSPHRASE_KEY = 'passphrase'
MAX_TRIES = 5 # Number of times to try an escrow upload.
TRY_DELAY_FACTOR = 5 # Number of seconds, (* try_num), to wait between tries.
XSRF_PATH = '/xsrf-token/%s'
def __init__(self, base_url, opener, headers=None):
self._metadata = None
self.base_url = base_url
self.xsrf_url = util.JoinURL(base_url, self.XSRF_PATH)
if self.ESCROW_PATH is None:
raise ValueError('ESCROW_PATH must be set by CauliflowerVestClient subclasses.')
self.escrow_url = util.JoinURL(base_url, self.ESCROW_PATH)
self.opener = opener
self.headers = headers or {}
def _GetMetadata(self):
"""Returns a dict of key/value metadata pairs."""
raise NotImplementedError
def RetrieveSecret(self, target_id):
"""Fetches and returns the passphrase.
Args:
target_id: str, Target ID to fetch the passphrase for.
Returns:
str: passphrase.
Raises:
RequestError: there was an error downloading the passphrase.
NotFoundError: no passphrase was found for the given target_id.
"""
xsrf_token = self._FetchXsrfToken(base_settings.GET_PASSPHRASE_ACTION)
url = '%s?%s' % (util.JoinURL(self.escrow_url, urllib.quote(target_id)),
urllib.urlencode({'xsrf-token': xsrf_token}))
request = urllib2.Request(url)
try:
response = self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
if e.code == httplib.NOT_FOUND:
raise NotFoundError('Failed to retrieve passphrase. %s' % e)
raise RequestError('Failed to retrieve passphrase. %s' % e)
content = response.read()
if not content.startswith(JSON_PREFIX):
raise RequestError('Expected JSON prefix missing.')
data = json.loads(content[len(JSON_PREFIX):])
return data[self.PASSPHRASE_KEY]
def GetAndValidateMetadata(self):
"""Retrieves and validates machine metadata.
Raises:
MetadataError: one or more of the REQUIRED_METADATA were not found.
"""
if not self._metadata:
self._metadata = self._GetMetadata()
for key in self.REQUIRED_METADATA:
if not self._metadata.get(key, None):
raise MetadataError('Required metadata is not found: %s' % key)
def SetOwner(self, owner):
if not self._metadata:
self.GetAndValidateMetadata()
self._metadata['owner'] = owner
def _FetchXsrfToken(self, action):
request = urllib2.Request(self.xsrf_url % action)
response = self._RetryRequest(request, 'Fetching XSRF token')
return response.read()
def _RetryRequest(self, request, description, retry_4xx=False):
"""Make the given HTTP request, retrying upon failure."""
for k, v in self.headers.iteritems():
request.add_header(k, v)
for try_num in range(self.MAX_TRIES):
try:
return self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
# Reraise if HTTP 4xx and retry_4xx is False
if 400 <= e.code < 500 and not retry_4xx:
raise RequestError('%s failed: %s' % (description, e))
# Otherwise retry other HTTPError and URLError failures.
if try_num == self.MAX_TRIES - 1:
logging.exception('%s failed permanently.', description)
raise RequestError(
'%s failed permanently: %s' % (description, e))
logging.warning(
'%s failed with (%s). Retrying ...', description, e)
time.sleep((try_num + 1) * self.TRY_DELAY_FACTOR)
def IsKeyRotationNeeded(self, target_id, tag='default'):
"""Check whether a key rotation is required.
Args:
target_id: str, Target ID.
tag: str, passphrase tag.
Raises:
RequestError: there was an error getting status from server.
Returns:
bool: True if a key rotation is required.
"""
url = '%s?%s' % (
util.JoinURL(
self.base_url, '/api/v1/rekey-required/',
self.ESCROW_PATH, target_id),
urllib.urlencode({'tag': tag}))
request = urllib2.Request(url)
try:
response = self.opener.open(request)
except urllib2.URLError as e: # Parent of urllib2.HTTPError.
if isinstance(e, urllib2.HTTPError):
e.msg += ': ' + e.read()
raise RequestError('Failed to get status. %s' % e)
content = response.read()
if not content.startswith(JSON_PREFIX):
raise RequestError('Expected JSON prefix missing.')
return json.loads(content[len(JSON_PREFIX):])
def UploadPassphrase(self, target_id, passphrase, retry_4xx=False):
"""Uploads a target_id/passphrase pair with metadata.
Args:
target_id: str, Target ID.
passphrase: str, passphrase.
retry_4xx: bool, whether to retry when errors are in the 401-499 range.
Raises:
RequestError: there was an error uploading to the server.
"""
xsrf_token = self._FetchXsrfToken(base_settings.SET_PASSPHRASE_ACTION)
# Ugh, urllib2 only does GET and POST?!
class PutRequest(urllib2.Request):
def __init__(self, *args, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers']['Content-Type'] = 'application/octet-stream'
urllib2.Request.__init__(self, *args, **kwargs)
self._method = 'PUT'
def get_method(self): # pylint: disable=g-bad-name
return 'PUT'
if not self._metadata:
self.GetAndValidateMetadata()
parameters = self._metadata.copy()
parameters['xsrf-token'] = xsrf_token
parameters['volume_uuid'] = target_id
url = '%s?%s' % (self.escrow_url, urllib.urlencode(parameters))
request = PutRequest(url, data=passphrase)
self._RetryRequest(request, 'Uploading passphrase', retry_4xx=retry_4xx)
def BuildOauth2Opener(credentials):
"""Produce an OAuth compatible urllib2 OpenerDirective."""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.verify_mode = ssl.CERT_REQUIRED
ca_certs_file = settings.ROOT_CA_CERT_CHAIN_PEM_FILE_PATH
context.load_verify_locations(ca_certs_file)
opener = urllib2.build_opener(
urllib2.HTTPSHandler(context=context),
urllib2.HTTPRedirectHandler())
h = {}
credentials.apply(h)
opener.addheaders = h.items()
return opener
def GetOauthCredentials():
"""Create an OAuth2 `Credentials` object."""
if not base_settings.OAUTH_CLIENT_ID:
raise RuntimeError('Missing OAUTH_CLIENT_ID setting!')
if not settings.OAUTH_CLIENT_SECRET:
raise RuntimeError('Missing OAUTH_CLIENT_SECRET setting!')
httpd = oauth2client.tools.ClientRedirectServer(
('localhost', 0), oauth2client.tools.ClientRedirectHandler)
httpd.timeout = 60
flow = oauth2client.client.OAuth2WebServerFlow(
client_id=base_settings.OAUTH_CLIENT_ID,
client_secret=settings.OAUTH_CLIENT_SECRET,
redirect_uri='http://%s:%s/' % httpd.server_address,
scope=base_settings.OAUTH_SCOPE,
)
authorize_url = flow.step1_get_authorize_url()
webbrowser.open(authorize_url, new=1, autoraise=True)
httpd.handle_request()
if 'error' in httpd.query_params:
raise AuthenticationError('Authentication request was rejected.')
try:
credentials = flow.step2_exchange(
httpd.query_params,
http=httplib2.Http(ca_certs=settings.ROOT_CA_CERT_CHAIN_PEM_FILE_PATH))
except oauth2client.client.FlowExchangeError as e:
raise AuthenticationError('Authentication has failed: %s' % e)
else:
logging.info('Authentication successful!')
return credentials
| apache-2.0 |
waseem18/oh-mainline | vendor/packages/Django/django/utils/archive.py | 229 | 6935 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError
def list(self):
raise NotImplementedError
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
# note: python<=2.5 doesnt seem to know about pax headers, filter them
members = [member for member in self._archive.getmembers()
if member.name != 'pax_global_header']
leading = self.has_leading_dir(members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
| agpl-3.0 |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/Cython/Build/Inline.py | 8 | 10878 | import sys, os, re, inspect
import imp
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
import Cython
from Cython.Compiler.Main import Context, CompilationOptions, default_options
from Cython.Compiler.ParseTreeTransforms import CythonTransform, SkipDeclarations, AnalyseDeclarationsTransform
from Cython.Compiler.TreeFragment import parse_from_strings
from Cython.Build.Dependencies import strip_string_literals, cythonize, cached_function
from Cython.Compiler import Pipeline
from Cython.Utils import get_cython_cache_dir
import cython as cython_module
# A utility function to convert user-supplied ASCII strings to unicode.
if sys.version_info[0] < 3:
def to_unicode(s):
if not isinstance(s, unicode):
return s.decode('ascii')
else:
return s
else:
to_unicode = lambda x: x
class AllSymbols(CythonTransform, SkipDeclarations):
def __init__(self):
CythonTransform.__init__(self, None)
self.names = set()
def visit_NameNode(self, node):
self.names.add(node.name)
@cached_function
def unbound_symbols(code, context=None):
code = to_unicode(code)
if context is None:
context = Context([], default_options)
from Cython.Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform
tree = parse_from_strings('(tree fragment)', code)
for phase in Pipeline.create_pipeline(context, 'pyx'):
if phase is None:
continue
tree = phase(tree)
if isinstance(phase, AnalyseDeclarationsTransform):
break
symbol_collector = AllSymbols()
symbol_collector(tree)
unbound = []
try:
import builtins
except ImportError:
import __builtin__ as builtins
for name in symbol_collector.names:
if not tree.scope.lookup(name) and not hasattr(builtins, name):
unbound.append(name)
return unbound
def unsafe_type(arg, context=None):
py_type = type(arg)
if py_type is int:
return 'long'
else:
return safe_type(arg, context)
def safe_type(arg, context=None):
py_type = type(arg)
if py_type in [list, tuple, dict, str]:
return py_type.__name__
elif py_type is complex:
return 'double complex'
elif py_type is float:
return 'double'
elif py_type is bool:
return 'bint'
elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray):
return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim)
else:
for base_type in py_type.mro():
if base_type.__module__ in ('__builtin__', 'builtins'):
return 'object'
module = context.find_module(base_type.__module__, need_pxd=False)
if module:
entry = module.lookup(base_type.__name__)
if entry.is_type:
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
def _get_build_extension():
dist = Distribution()
# Ensure the build respects distutils configuration by parsing
# the configuration files
config_files = dist.find_config_files()
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@cached_function
def _create_context(cython_include_dirs):
return Context(list(cython_include_dirs), default_options)
def cython_inline(code,
get_type=unsafe_type,
lib_dir=os.path.join(get_cython_cache_dir(), 'inline'),
cython_include_dirs=['.'],
force=False,
quiet=False,
locals=None,
globals=None,
**kwds):
if get_type is None:
get_type = lambda x: 'object'
code = to_unicode(code)
orig_code = code
code, literals = strip_string_literals(code)
code = strip_common_indent(code)
ctx = _create_context(tuple(cython_include_dirs))
if locals is None:
locals = inspect.currentframe().f_back.f_back.f_locals
if globals is None:
globals = inspect.currentframe().f_back.f_back.f_globals
try:
for symbol in unbound_symbols(code):
if symbol in kwds:
continue
elif symbol in locals:
kwds[symbol] = locals[symbol]
elif symbol in globals:
kwds[symbol] = globals[symbol]
else:
print("Couldn't find ", symbol)
except AssertionError:
if not quiet:
# Parsing from strings not fully supported (e.g. cimports).
print("Could not parse code as a string (to extract unbound symbols).")
cimports = []
for name, arg in kwds.items():
if arg is cython_module:
cimports.append('\ncimport cython as %s' % name)
del kwds[name]
arg_names = kwds.keys()
arg_names.sort()
arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names])
key = orig_code, arg_sigs, sys.version_info, sys.executable, Cython.__version__
module_name = "_cython_inline_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
if module_name in sys.modules:
module = sys.modules[module_name]
else:
build_extension = None
if cython_inline.so_ext is None:
# Figure out and cache current extension suffix
build_extension = _get_build_extension()
cython_inline.so_ext = build_extension.get_ext_filename('')
module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if force or not os.path.isfile(module_path):
cflags = []
c_include_dirs = []
qualified = re.compile(r'([.\w]+)[.]')
for type, _ in arg_sigs:
m = qualified.match(type)
if m:
cimports.append('\ncimport %s' % m.groups()[0])
# one special case
if m.groups()[0] == 'numpy':
import numpy
c_include_dirs.append(numpy.get_include())
# cflags.append('-Wno-unused')
module_body, func_body = extract_func_code(code)
params = ', '.join(['%s %s' % a for a in arg_sigs])
module_code = """
%(module_body)s
%(cimports)s
def __invoke(%(params)s):
%(func_body)s
""" % {'cimports': '\n'.join(cimports), 'module_body': module_body, 'params': params, 'func_body': func_body }
for key, value in literals.items():
module_code = module_code.replace(key, value)
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
fh = open(pyx_file, 'w')
try:
fh.write(module_code)
finally:
fh.close()
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
extra_compile_args = cflags)
if build_extension is None:
build_extension = _get_build_extension()
build_extension.extensions = cythonize([extension], include_path=cython_include_dirs, quiet=quiet)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
module = imp.load_dynamic(module_name, module_path)
arg_list = [kwds[arg] for arg in arg_names]
return module.__invoke(*arg_list)
# Cached suffix used by cython_inline above. None should get
# overridden with actual value upon the first cython_inline invocation
cython_inline.so_ext = None
non_space = re.compile('[^ ]')
def strip_common_indent(code):
min_indent = None
lines = code.split('\n')
for line in lines:
match = non_space.search(line)
if not match:
continue # blank
indent = match.start()
if line[indent] == '#':
continue # comment
elif min_indent is None or min_indent > indent:
min_indent = indent
for ix, line in enumerate(lines):
match = non_space.search(line)
if not match or line[indent] == '#':
continue
else:
lines[ix] = line[min_indent:]
return '\n'.join(lines)
module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))')
def extract_func_code(code):
module = []
function = []
current = function
code = code.replace('\t', ' ')
lines = code.split('\n')
for line in lines:
if not line.startswith(' '):
if module_statement.match(line):
current = module
else:
current = function
current.append(line)
return '\n'.join(module), ' ' + '\n '.join(function)
try:
from inspect import getcallargs
except ImportError:
def getcallargs(func, *arg_values, **kwd_values):
all = {}
args, varargs, kwds, defaults = inspect.getargspec(func)
if varargs is not None:
all[varargs] = arg_values[len(args):]
for name, value in zip(args, arg_values):
all[name] = value
for name, value in kwd_values.items():
if name in args:
if name in all:
raise TypeError, "Duplicate argument %s" % name
all[name] = kwd_values.pop(name)
if kwds is not None:
all[kwds] = kwd_values
elif kwd_values:
raise TypeError, "Unexpected keyword arguments: %s" % kwd_values.keys()
if defaults is None:
defaults = ()
first_default = len(args) - len(defaults)
for ix, name in enumerate(args):
if name not in all:
if ix >= first_default:
all[name] = defaults[ix - first_default]
else:
raise TypeError, "Missing argument: %s" % name
return all
def get_body(source):
ix = source.index(':')
if source[:5] == 'lambda':
return "return %s" % source[ix+1:]
else:
return source[ix+1:]
# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
| gpl-2.0 |
jeanlinux/calibre | src/calibre/ebooks/rtf2xml/info.py | 24 | 11629 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os, re
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
class Info:
"""
Make tags for document-information
"""
def __init__(self,
in_file,
bug_handler,
copy = None,
run_level = 1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__text_string = ''
self.__state = 'before_info_table'
self.rmspace = re.compile(r'\s+')
self.__state_dict = {
'before_info_table': self.__before_info_table_func,
'after_info_table': self.__after_info_table_func,
'in_info_table' : self.__in_info_table_func,
'collect_text' : self.__collect_text_func,
'collect_tokens' : self.__collect_tokens_func,
}
self.__info_table_dict = {
'cw<di<title_____' : (self.__found_tag_with_text_func, 'title'),
'cw<di<author____' : (self.__found_tag_with_text_func, 'author'),
'cw<di<operator__' : (self.__found_tag_with_text_func, 'operator'),
'cw<di<manager___' : (self.__found_tag_with_text_func, 'manager'),
'cw<di<company___' : (self.__found_tag_with_text_func, 'company'),
'cw<di<keywords__' : (self.__found_tag_with_text_func, 'keywords'),
'cw<di<category__' : (self.__found_tag_with_text_func, 'category'),
'cw<di<doc-notes_' : (self.__found_tag_with_text_func, 'doc-notes'),
'cw<di<subject___' : (self.__found_tag_with_text_func, 'subject'),
'cw<di<linkbase__' : (self.__found_tag_with_text_func, 'hyperlink-base'),
'cw<di<create-tim' : (self.__found_tag_with_tokens_func, 'creation-time'),
'cw<di<revis-time' : (self.__found_tag_with_tokens_func, 'revision-time'),
'cw<di<print-time' : (self.__found_tag_with_tokens_func, 'printing-time'),
'cw<di<backuptime' : (self.__found_tag_with_tokens_func, 'backup-time'),
'cw<di<num-of-wor' : (self.__single_field_func, 'number-of-words'),
'cw<di<num-of-chr' : (self.__single_field_func, 'number-of-characters'),
'cw<di<numofchrws' : (self.__single_field_func, 'number-of-characters-without-space'),
'cw<di<num-of-pag' : (self.__single_field_func, 'number-of-pages'),
'cw<di<version___' : (self.__single_field_func, 'version'),
'cw<di<edit-time_' : (self.__single_field_func, 'editing-time'),
'cw<di<intern-ver' : (self.__single_field_func, 'internal-version-number'),
'cw<di<internalID' : (self.__single_field_func, 'internal-id-number'),
}
self.__token_dict = {
'year______' : 'year',
'month_____' : 'month',
'day_______' : 'day',
'minute____' : 'minute',
'second____' : 'second',
'revis-time' : 'revision-time',
'create-tim' : 'creation-time',
'edit-time_' : 'editing-time',
'print-time' : 'printing-time',
'backuptime' : 'backup-time',
'num-of-wor' : 'number-of-words',
'num-of-chr' : 'number-of-characters',
'numofchrws' : 'number-of-characters-without-space',
'num-of-pag' : 'number-of-pages',
'version___' : 'version',
'intern-ver' : 'internal-version-number',
'internalID' : 'internal-id-number',
}
def __before_info_table_func(self, line):
"""
Required:
line -- the line to parse
Returns:
nothing
Logic:
Check for the beginning of the informatin table. When found, set
the state to the information table. Always write the line.
"""
if self.__token_info == 'mi<mk<doc-in-beg':
self.__state = 'in_info_table'
self.__write_obj.write(line)
def __in_info_table_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing.
Logic:
Check for the end of information. If not found, check if the
token has a special value in the info table dictionay. If it
does, execute that function.
Otherwise, output the line to the file.
"""
if self.__token_info == 'mi<mk<doc-in-end':
self.__state = 'after_info_table'
else:
action, tag = self.__info_table_dict.get(self.__token_info, (None, None))
if action:
action(line, tag)
else:
self.__write_obj.write(line)
def __found_tag_with_text_func(self, line, tag):
"""
Requires:
line -- line to parse
tag --what kind of line
Returns:
nothing
Logic:
This function marks the beginning of informatin fields that have
text that must be collected. Set the type of information field
with the tag option. Set the state to collecting text
"""
self.__tag = tag
self.__state = 'collect_text'
def __collect_text_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
If the end of the information field is found, write the text
string to the file.
Otherwise, if the line contains text, add it to the text string.
"""
if self.__token_info == 'mi<mk<docinf-end':
self.__state = 'in_info_table'
#Don't print empty tags
if len(self.rmspace.sub('',self.__text_string)):
self.__write_obj.write(
'mi<tg<open______<%s\n'
'tx<nu<__________<%s\n'
'mi<tg<close_____<%s\n' % (self.__tag, self.__text_string, self.__tag)
)
self.__text_string = ''
elif line[0:2] == 'tx':
self.__text_string += line[17:-1]
def __found_tag_with_tokens_func(self, line, tag):
"""
Requires:
line -- line to parse
tag -- type of field
Returns:
nothing
Logic:
Some fields have a series of tokens (cw<di<year______<nu<2003)
that must be parsed as attributes for the element.
Set the state to collect tokesn, and set the text string to
start an empty element with attributes.
"""
self.__state = 'collect_tokens'
self.__text_string = 'mi<tg<empty-att_<%s' % tag
#mi<tg<empty-att_<page-definition<margin>33\n
def __collect_tokens_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
This function collects all the token information and adds it to
the text string until the end of the field is found.
First check of the end of the information field. If found, write
the text string to the file.
If not found, get the relevant information from the text string.
This information cannot be directly added to the text string,
because it exists in abbreviated form. (num-of-wor)
I want to check this information in a dictionary to convert it
to a longer, readable form. If the key does not exist in the
dictionary, print out an error message. Otherise add the value
to the text string.
(num-of-wor => number-of-words)
"""
#cw<di<year______<nu<2003
if self.__token_info == 'mi<mk<docinf-end':
self.__state = 'in_info_table'
self.__write_obj.write(
'%s\n' % self.__text_string
)
self.__text_string = ''
else:
att = line[6:16]
value = line[20:-1]
att_changed = self.__token_dict.get(att)
if att_changed is None:
if self.__run_level > 3:
msg = 'No dictionary match for %s\n' % att
raise self.__bug_handler, msg
else:
self.__text_string += '<%s>%s' % (att_changed, value)
def __single_field_func(self, line, tag):
value = line[20:-1]
self.__write_obj.write(
'mi<tg<empty-att_<%s<%s>%s\n' % (tag, tag, value)
)
def __after_info_table_func(self, line):
"""
Requires:
line --line to write to file
Returns:
nothing
Logic:
After the end of the information table, simple write the line to
the file.
"""
self.__write_obj.write(line)
def fix_info(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the information table, look for the
beginning of the style table.
If the state is in the information table, use other methods to
parse the information
style table, look for lines with style info, and substitute the
number with the name of the style. If the state if afer the
information table, simply write the line to the output file.
"""
self.__initiate_values()
with open(self.__file, 'r') as read_obj:
with open(self.__write_to, 'wb') as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('No matching state in module styles.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "info.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| gpl-3.0 |
khkaminska/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/linux2/twisted/test/test_paths.py | 3 | 6972 |
import os, time, pickle
from twisted.python import filepath
from twisted.python.runtime import platform
from twisted.trial import unittest
class FilePathTestCase(unittest.TestCase):
f1content = "file 1"
f2content = "file 2"
def setUp(self):
self.now = time.time()
cmn = self.mktemp()
os.mkdir(cmn)
os.mkdir(os.path.join(cmn,"sub1"))
f = open(os.path.join(cmn, "file1"),"wb")
f.write(self.f1content)
f = open(os.path.join(cmn, "sub1", "file2"),"wb")
f.write(self.f2content)
os.mkdir(os.path.join(cmn, 'sub3'))
f = open(os.path.join(cmn, "sub3", "file3.ext1"),"wb")
f = open(os.path.join(cmn, "sub3", "file3.ext2"),"wb")
f = open(os.path.join(cmn, "sub3", "file3.ext3"),"wb")
self.path = filepath.FilePath(cmn)
def testGetAndSet(self):
content = 'newcontent'
self.path.child('new').setContent(content)
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
content = 'content'
self.path.child('new').setContent(content, '.tmp')
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
if platform.getType() == 'win32':
testGetAndSet.todo = "os.rename in FilePath.setContent doesn't work too well on Windows"
def testValidSubdir(self):
sub1 = self.path.child('sub1')
self.failUnless(sub1.exists(),
"This directory does exist.")
self.failUnless(sub1.isdir(),
"It's a directory.")
self.failUnless(not sub1.isfile(),
"It's a directory.")
self.failUnless(not sub1.islink(),
"It's a directory.")
self.failUnlessEqual(sub1.listdir(),
['file2'])
def testMultiExt(self):
f3 = self.path.child('sub3').child('file3')
exts = '.foo','.bar', 'ext1','ext2','ext3'
self.failIf(f3.siblingExtensionSearch(*exts))
f3e = f3.siblingExtension(".foo")
f3e.touch()
self.failIf(not f3.siblingExtensionSearch(*exts).exists())
self.failIf(not f3.siblingExtensionSearch('*').exists())
f3e.remove()
self.failIf(f3.siblingExtensionSearch(*exts))
def testInvalidSubdir(self):
sub2 = self.path.child('sub2')
self.failIf(sub2.exists(),
"This directory does not exist.")
def testValidFiles(self):
f1 = self.path.child('file1')
self.failUnlessEqual(f1.open().read(), self.f1content)
f2 = self.path.child('sub1').child('file2')
self.failUnlessEqual(f2.open().read(), self.f2content)
def testPreauthChild(self):
fp = filepath.FilePath('.')
fp.preauthChild('foo/bar')
self.assertRaises(filepath.InsecurePath, fp.child, '/foo')
def testStatCache(self):
p = self.path.child('stattest')
p.touch()
self.failUnlessEqual(p.getsize(), 0)
self.failUnlessEqual(abs(p.getmtime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getctime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getatime() - time.time()) // 20, 0)
self.failUnlessEqual(p.exists(), True)
self.failUnlessEqual(p.exists(), True)
p.remove()
# test caching
self.failUnlessEqual(p.exists(), True)
p.restat(reraise=False)
self.failUnlessEqual(p.exists(), False)
self.failUnlessEqual(p.islink(), False)
self.failUnlessEqual(p.isdir(), False)
self.failUnlessEqual(p.isfile(), False)
def testPersist(self):
newpath = pickle.loads(pickle.dumps(self.path))
self.failUnlessEqual(self.path.__class__, newpath.__class__)
self.failUnlessEqual(self.path.path, newpath.path)
def testInsecureUNIX(self):
self.assertRaises(filepath.InsecurePath, self.path.child, "..")
self.assertRaises(filepath.InsecurePath, self.path.child, "/etc")
self.assertRaises(filepath.InsecurePath, self.path.child, "../..")
def testInsecureWin32(self):
self.assertRaises(filepath.InsecurePath, self.path.child, r"..\..")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:randomfile")
if platform.getType() != 'win32':
testInsecureWin32.skip = "Consider yourself lucky."
else:
testInsecureWin32.todo = "Hrm, broken"
def testInsecureWin32Whacky(self):
"""Windows has 'special' filenames like NUL and CON and COM1 and LPR
and PRN and ... god knows what else. They can be located anywhere in
the filesystem. For obvious reasons, we do not wish to normally permit
access to these.
"""
self.assertRaises(filepath.InsecurePath, self.path.child, "CON")
self.assertRaises(filepath.InsecurePath, self.path.child, "C:CON")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:\CON")
if platform.getType() != 'win32':
testInsecureWin32Whacky.skip = "Consider yourself lucky."
else:
testInsecureWin32Whacky.todo = "Broken, no checking for whacky devices"
from twisted.python import urlpath
class URLPathTestCase(unittest.TestCase):
def setUp(self):
self.path = urlpath.URLPath.fromString("http://example.com/foo/bar?yes=no&no=yes#footer")
def testStringConversion(self):
self.assertEquals(str(self.path), "http://example.com/foo/bar?yes=no&no=yes#footer")
def testChildString(self):
self.assertEquals(str(self.path.child('hello')), "http://example.com/foo/bar/hello")
self.assertEquals(str(self.path.child('hello').child('')), "http://example.com/foo/bar/hello/")
def testSiblingString(self):
self.assertEquals(str(self.path.sibling('baz')), 'http://example.com/foo/baz')
# The sibling of http://example.com/foo/bar/
# is http://example.comf/foo/bar/baz
# because really we are constructing a sibling of
# http://example.com/foo/bar/index.html
self.assertEquals(str(self.path.child('').sibling('baz')), 'http://example.com/foo/bar/baz')
def testParentString(self):
# parent should be equivalent to '..'
# 'foo' is the current directory, '/' is the parent directory
self.assertEquals(str(self.path.parent()), 'http://example.com/')
self.assertEquals(str(self.path.child('').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('baz').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.parent().parent().parent().parent().parent()), 'http://example.com/')
def testHereString(self):
# here should be equivalent to '.'
self.assertEquals(str(self.path.here()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('').here()), 'http://example.com/foo/bar/')
| gpl-2.0 |
jeremiedecock/pyai | ailib/optimize/functions/unconstrained.py | 1 | 31848 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017,2018,2019 Jeremie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module contains some classical test functions for unconstrained continuous
single-objective optimization.
"""
__all__ = ['sphere', 'Sphere', 'sphere1d', 'sphere2d', # TODO
'rosen', 'Rosenbrock', 'rosen2d',
'himmelblau', 'Himmelblau', 'himmelblau2d',
'rastrigin', 'Rastrigin', 'rastrigin2d',
'easom', 'Easom', 'easom2d',
'crossintray', 'Crossintray', 'crossintray2d',
'holder', 'Holder', 'holder2d']
import numpy as np
# GENERIC OBJECTIVE FUNCTION ##################################################
class _ObjectiveFunction:
"""Generic *objective function*.
TODO
"""
def __init__(self):
self._objective_function = None
self._gradient_function = None # TODO: use a generic numeric derivative function by default
self._hessian_function = None # TODO: use a generic numeric derivative function by default
self.reset_eval_counters()
self.reset_eval_logs()
self.do_eval_logs = False
self.noise = None
self.ndim = None
self.bounds = None
self.continuous = None
self.translation_vector = np.zeros(shape=self.ndim)
self.function_name = None
self.function_formula = None
self.arg_min = None
@property
def stochastic(self):
return self.noise is not None
@property
def unimodal(self):
raise NotImplementedError
def reset_eval_counters(self):
# TODO: make an external Log (or Counter) class
self.num_eval = 0
self.num_gradient_eval = 0
self.num_hessian_eval = 0
def reset_eval_logs(self):
# TODO: make an external Log class
self.eval_logs_dict = {'x': [], 'fx': []} # TODO
def __call__(self, x):
"""Evaluate one or several points.
This function is a wrapper that does several boring task aside the
evaluation of `func`: check arguments, log results, ...
Parameters
----------
func : callable object
The function used to evaluate `x`.
y : ndarray
The 1D or 2D numpy array containing the points to evaluate.
If `x` is a 2D array, the coordinates of each points are
distributed along *the first dimension*.
For instance, to evaluate the three 2D points (0,0), (1,1) and
(2,2), `x` have to be coded as the following:
`x = np.array([[0, 1, 2], [0, 1, 2]])`
so that the first point is given by `x[:,0]`, the second point by
`x[:,1]`, ... (this makes functions definition much simpler).
Returns
-------
float or ndarray
The results of the evaluation: a scalar if only one point has been
evaluated or a 1D numpy array if several points have been
evaluated.
"""
# Check self._objective_function ########
assert self._objective_function is not None
assert callable(self._objective_function)
# Check x shape #########################
if x.ndim > 0:
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if (x.ndim == 0) or (x.ndim == 1):
self.num_eval += 1
elif x.ndim == 2:
self.num_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
y = self._objective_function(x_translated)
# Apply noise ###########################
if self.noise is not None:
y = self.noise(x, y)
# Update the evals log ##################
# TODO: make an external Log class
if self.do_eval_logs:
if y.ndim == 0:
self.eval_logs_dict['x'].append(x) # TODO
elif y.ndim == 1:
self.eval_logs_dict['x'].extend(x.T) # TODO
else:
raise Exception("Wrong output dimension.")
if y.ndim == 0:
self.eval_logs_dict['fx'].append(y) # TODO
elif y.ndim == 1:
self.eval_logs_dict['fx'].extend(y) # TODO
else:
raise Exception("Wrong output dimension.")
return y
def gradient(self, x):
"""
The derivative (i.e. gradient) of the objective function.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the derivative is to be computed
or a two dimension Numpy array of points at which the derivatives are to be computed.
Returns
-------
float or array_like
gradient of the objective function at `x`.
"""
# Check self._gradient_function #########
assert self._gradient_function is not None
assert callable(self._gradient_function)
# Check x shape #########################
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if x.ndim == 1:
self.num_gradient_eval += 1
elif x.ndim == 2:
self.num_gradient_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
grad = self._gradient_function(x_translated)
return grad
def hessian(self, x):
"""
The Hessian matrix of the objective function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the objective function at `x`.
"""
# Check self._gradient_function #########
assert self._hessian_function is not None
assert callable(self._hessian_function)
# Check x shape #########################
if x.shape[0] != self.ndim:
raise Exception('Wrong number of dimension: x has {} rows instead of {}.'.format(x.shape[0], self.ndim))
# Update the evaluations counter ########
# TODO: make an external Log (or Counter) class
if x.ndim == 1:
self.num_hessian_eval += 1
elif x.ndim == 2:
self.num_hessian_eval += x.shape[1]
else:
raise Exception('Wrong number of dimension: x is a {} dimensions numpy array ; 1 or 2 dimensions are expected.'.format(x.ndim))
# Apply translation #####################
x_translated = (x.T - self.translation_vector).T
# Eval x ################################
hess = self._hessian_function(x_translated)
return hess
def __str__(self):
name = r""
if self.stochastic is not None:
name += "stochastic "
if self.function_name is not None:
name += self.function_name
else:
name += self.__class__.__name__
if self.function_formula is not None:
name += ": " + self.function_formula
return name
# SPHERE FUNCTION #############################################################
def sphere(x):
r"""The Sphere function.
The Sphere function is a famous **convex** function used to test the performance of optimization algorithms.
This function is very easy to optimize and can be used as a first test to check an optimization algorithm.
.. math::
f(\boldsymbol{x}) = \sum_{i=1}^{n} x_{i}^2
Global minimum:
.. math::
f(\boldsymbol{0}) = 0
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
.. image:: sphere_3d.png
.. image:: sphere.png
Example
-------
To evaluate the single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> sphere( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate the single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> sphere( np.array([1, 1, 1]) )
3.0
The result should be :math:`f(x) = 3.0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> sphere( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([0., 2., 8.])
The result should be :math:`f(x_1) = 0`, :math:`f(x_2) = 1` and :math:`f(x_3) = 8`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Sphere function is to be computed
or a two dimension Numpy array of points at which the Sphere function is to be computed.
Returns
-------
float or array_like
The value(s) of the Sphere function for the given point(s) `x`.
See Also
--------
sphere_gradient, sphere_hessian
"""
# Remark: `sum(x**2.0)` is equivalent to `np.sum(x**2.0, axis=0)` but only the latter works if x is a scallar (e.g. x = np.float(3)).
return np.sum(x**2.0, axis=0)
def sphere_gradient(x):
"""
The derivative (i.e. gradient) of the Sphere function.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the derivative is to be computed
or a two dimension Numpy array of points at which the derivatives are to be computed.
Returns
-------
float or array_like
gradient of the Sphere function at `x`.
See Also
--------
sphere, sphere_hessian
"""
return 2.0 * x
def sphere_hessian(x):
"""
The Hessian matrix of the Sphere function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Sphere function at `x`.
See Also
--------
sphere, sphere_gradient
"""
return 2.0 * np.ones(x.shape)
class Sphere(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = sphere
self._gradient_function = sphere_gradient
self._hessian_function = sphere_hessian
self.ndim = ndim
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.zeros(self.ndim)
@property
def unimodal(self):
return True
sphere1d = Sphere(ndim=1)
sphere2d = Sphere(ndim=2)
# ROSENBROCK FUNCTION #########################################################
def rosen(x):
r"""The (extended) Rosenbrock function.
The Rosenbrock function is a famous **non-convex** function used to test
the performance of optimization algorithms. The classical two-dimensional
version of this function is **unimodal** but its *extended* :math:`n`-dimensional
version (with :math:`n \geq 4`) is **multimodal** [SHANG06]_.
.. math::
f(\boldsymbol{x}) = \sum_{i=1}^{n-1} \left[100 \left( x_{i+1} - x_{i}^{2} \right)^{2} + \left( x_{i} - 1 \right)^2 \right]
Global minimum:
.. math::
\min =
\begin{cases}
n = 2 & \rightarrow \quad f(1,1) = 0, \\
n = 3 & \rightarrow \quad f(1,1,1) = 0, \\
n > 3 & \rightarrow \quad f(\underbrace{1,\dots,1}_{n{\text{ times}}}) = 0 \\
\end{cases}
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
The Rosenbrock has exactly one (global) minimum :math:`(\underbrace{1, \dots,
1}_{n{\text{ times}}})^\top` for :math:`n \leq 3` and an additional *local*
minimum for :math:`n \geq 4` near :math:`(-1, 1, 1, \dots, 1)^\top`.
See http://www.mitpressjournals.org/doi/abs/10.1162/evco.2006.14.1.119
(freely available at http://dl.acm.org/citation.cfm?id=1118014) and
https://en.wikipedia.org/wiki/Rosenbrock_function#Multidimensional_generalisations
for more information.
See https://en.wikipedia.org/wiki/Rosenbrock_function and
http://mathworld.wolfram.com/RosenbrockFunction.html for more information.
The Rosenbrock function, its derivative (i.e. gradient) and its hessian matrix are also implemented in Scipy
([scipy.optimize.rosen](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen.html#scipy.optimize.rosen),
[scipy.optimize.rosen_der](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_der.html#scipy.optimize.rosen_der),
[scipy.optimize.rosen_hess](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess.html#scipy.optimize.rosen_hess) and
[scipy.optimize.rosen_hess_prod](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess_prod.html#scipy.optimize.rosen_hess_prod)).
See [Scipy documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html#rosenbrock-function) for more information.
.. image:: rosenbrock_3d.png
.. image:: rosenbrock.png
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Rosenbrock function is to be computed
or a two dimension Numpy array of points at which the Rosenbrock function is to be computed.
Returns
-------
float or array_like
The value(s) of the Rosenbrock function for the given point(s) `x`.
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> rosen( np.array([0, 0]) )
1.0
The result should be :math:`f(x) = 1`.
Example
-------
To evaluate a single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> rosen( np.array([1, 1, 1]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> rosen( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 0., 401.])
The result should be :math:`f(x_1) = 1`, :math:`f(x_2) = 0` and :math:`f(x_3) = 401`.
References
----------
.. [SHANG06] `Shang, Y. W., & Qiu, Y. H. (2006). A note on the extended Rosenbrock function. Evolutionary Computation, 14(1), 119-126. <http://www.mitpressjournals.org/doi/abs/10.1162/evco.2006.14.1.119>`_
"""
return np.sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0)
class Rosenbrock(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = rosen
self.ndim = ndim
if self.ndim < 2: # TODO
raise ValueError("The rosenbrock function is defined for solution spaces having at least 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return True if self.ndim < 4 else False
rosen2d = Rosenbrock(ndim=2)
# HIMMELBLAU'S FUNCTION #######################################################
def himmelblau(x):
r"""The Himmelblau's function.
The Himmelblau's function is a two-dimensional **multimodal** function.
.. math::
f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2
The function has four global minima:
.. math::
\begin{eqnarray}
f(3, 2) = 0 \\
f(-2.805118, 3.131312) = 0 \\
f(-3.779310, -3.283186) = 0 \\
f(3.584428, -1.848126) = 0
\end{eqnarray}
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^2
It also has one local maximum at :math:`f(-0.270845, -0.923039) = 181.617`.
The locations of all the minima can be found analytically (roots of cubic
polynomials) but expressions are somewhat complicated.
The function is named after David Mautner Himmelblau, who introduced it in
*Applied Nonlinear Programming* (1972), McGraw-Hill, ISBN 0-07-028921-2.
See https://en.wikipedia.org/wiki/Himmelblau%27s_function for more information.
.. image:: himmelblau_3d.png
.. image:: himmelblau.png
Example
-------
To evaluate a single point :math:`x = \begin{pmatrix} 3 \\ 2 \end{pmatrix}`:
>>> himmelblau( np.array([3, 2]) )
0.0
The result should be :math:`f(x) = 1`.
Example
-------
To evaluate multiple points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> himmelblau( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([170., 106., 26.])
The result should be :math:`f(x_1) = 170`, :math:`f(x_2) = 106` and :math:`f(x_3) = 26`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Himmelblau's function is to be computed
or a two dimension Numpy array of points at which the Himmelblau's function is to be computed.
Returns
-------
float or array_like
The value(s) of the Himmelblau's function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return (x[0]**2.0 + x[1] - 11.0)**2.0 + (x[0] + x[1]**2.0 - 7.0)**2.0
class Himmelblau(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = himmelblau
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The himmelblau function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
himmelblau2d = Himmelblau(ndim=2)
# RASTRIGIN FUNCTION ##########################################################
def rastrigin(x):
r"""The Rastrigin function.
The Rastrigin function is a famous **multimodal** function.
Finding the minimum of this function is a fairly difficult problem due to
its large search space and its large number of local minima.
The classical two-dimensional version of this function has been introduced
by L. A. Rastrigin in *Systems of extremal control* Mir, Moscow (1974).
Its *generalized* :math:`n`-dimensional version has been proposed by H.
Mühlenbein, D. Schomisch and J. Born in *The Parallel Genetic Algorithm as
Function Optimizer* Parallel Computing, 17, pages 619–632, 1991.
On an n-dimensional domain it is defined by:
.. math::
f(\boldsymbol{x}) = An + \sum_{i=1}^{n} \left[ x_{i}^{2} - A \cos(2 \pi x_{i}) \right]
where :math:`A = 10`.
Global minimum:
.. math::
f(\boldsymbol{0}) = 0
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^n
See https://en.wikipedia.org/wiki/Rastrigin_function for more information.
.. image:: rastrigin_3d.png
.. image:: rastrigin.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> rastrigin( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate a single 3D point :math:`x = \begin{pmatrix} 1 \\ 1 \\ 1 \end{pmatrix}`:
>>> rastrigin( np.array([0, 0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 2 \\ 2 \end{pmatrix}` at once:
>>> rastrigin( np.array([[0, 1, 2], [0, 1, 2]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 0., 401.])
The result should be :math:`f(x_1) = 1`, :math:`f(x_2) = 0` and :math:`f(x_3) = 401`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Rastrigin function is to be computed
or a two dimension Numpy array of points at which the Rastrigin function is to be computed.
Returns
-------
float or array_like
The value(s) of the Rastrigin function for the given point(s) `x`.
"""
A = 10.
n = x.shape[0]
return A * n + np.sum(x**2.0 - A * np.cos(2.0 * np.pi * x), axis=0)
class Rastrigin(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = rastrigin
self.ndim = ndim
if self.ndim < 2: # TODO
raise ValueError("The rastrigin function is defined for solution spaces having at least 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
rastrigin2d = Rastrigin(ndim=2)
# EASOM FUNCTION ##############################################################
def easom(x):
r"""The Easom function.
The Easom function is a 2 dimensions **unimodal** function.
.. math::
f(x_1, x_2) = -\cos(x_1) \cos(x_2) \exp \left( -\left[ (x_1-\pi)^2 + (x_2-\pi)^2 \right] \right)
Global minimum:
.. math::
f(\pi, \pi) = -1
Search domain:
.. math::
\boldsymbol{x} \in \mathbb{R}^2
See https://www.sfu.ca/~ssurjano/easom.html for more information.
.. image:: easom_3d.png
.. image:: easom.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> easom( np.array([np.pi, np.pi]) )
-1.0
The result should be :math:`f(x) = -1`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} \pi \\ \pi \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 1 \\ 1 \end{pmatrix}` at once:
>>> easom( np.array([[np.pi, 0, 1], [np.pi, 0, 1]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([-1., -2.67528799e-09, -3.03082341e-05])
The result should be :math:`f(x_1) = -1`, :math:`f(x_2) \approx 0` and :math:`f(x_3) \approx 0`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Easom function is to be computed
or a two dimension Numpy array of points at which the Easom function is to be computed.
Returns
-------
float or array_like
The value(s) of the Easom function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -np.cos(x[0]) * np.cos(x[1]) * np.exp(-((x[0]-np.pi)**2.0 + (x[1]-np.pi)**2.0))
class Easom(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = easom
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The easom function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10. # TODO
self.bounds[1,:] = 10. # TODO
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return True
easom2d = Easom(ndim=2)
# CROSS-IN-TRAY FUNCTION ######################################################
def crossintray(x):
r"""The Cross-in-tray function.
The Cross-in-tray function is a 2 dimensions **multimodal** function, with
four global minima.
.. math::
f(x_1, x_2) = -0.0001 \left( \left| \sin(x_1) \sin(x_2) \exp \left( \left| 100 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi} \right| \right)\right| + 1 \right)^{0.1}
Global minima:
.. math::
\text{Min} =
\begin{cases}
f(1.34941, -1.34941) &= -2.06261 \\
f(1.34941, 1.34941) &= -2.06261 \\
f(-1.34941, 1.34941) &= -2.06261 \\
f(-1.34941, -1.34941) &= -2.06261 \\
\end{cases}
Search domain:
.. math::
-10 \leq x_1, x_2 \leq 10
**References**: *Test functions for optimization* (Wikipedia):
https://en.wikipedia.org/wiki/Test_functions_for_optimization.
.. image:: cross_in_tray_3d.png
.. image:: cross_in_tray.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> crossintray( np.array([0, 0]) )
-0.0001
The result should be :math:`f(x) = -0.0001`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 1.34941 \\ 1.34941 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} -1.34941 \\ -1.34941 \end{pmatrix}` at once:
>>> crossintray( np.array([[0, 1.34941, -1.34941], [0, 1.34941, -1.34941]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([ -0.0001, -2.06261, -2.06261])
The result should be :math:`f(x_1) = -0.0001`, :math:`f(x_2) = -2.06261` and :math:`f(x_3) = -2.06261`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Cross-in-tray function is to be computed
or a two dimension Numpy array of points at which the Cross-in-tray function is to be computed.
Returns
-------
float or array_like
The value(s) of the Cross-in-tray function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -0.0001 * (np.abs(np.sin(x[0]) * np.sin(x[1]) * np.exp( np.abs( 100.0 - np.sqrt(x[0]**2.0 + x[1]**2.0)/np.pi ))) + 1.0)**0.1
class Crossintray(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = crossintray
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The crossintray function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
crossintray2d = Crossintray(ndim=2)
# HÖLDER TABLE FUNCTION #######################################################
def holder(x):
r"""The Hölder table function.
The Hölder table function is a 2 dimensions **multimodal** function, with
four global minima.
.. math::
f(x_1, x_2) =
-\left| \sin(x_1) \cos(x_2) \exp \left( \left| 1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi} \right| \right) \right|
Global minima:
.. math::
\text{Min} =
\begin{cases}
f(8.05502, 9.66459) &= -19.2085 \\
f(-8.05502, 9.66459) &= -19.2085 \\
f(8.05502, -9.66459) &= -19.2085 \\
f(-8.05502, -9.66459) &= -19.2085
\end{cases}
Search domain:
.. math::
-10 \leq x_1, x_2 \leq 10
**References**: *Test functions for optimization* (Wikipedia):
https://en.wikipedia.org/wiki/Test_functions_for_optimization.
.. image:: holder_3d.png
.. image:: holder.png
Example
-------
To evaluate a single 2D point :math:`x = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`:
>>> holder( np.array([0, 0]) )
0.0
The result should be :math:`f(x) = 0`.
Example
-------
To evaluate multiple 2D points :math:`x_1 = \begin{pmatrix} 0 \\ 0 \end{pmatrix}`,
:math:`x_2 = \begin{pmatrix} 0 \\ 1 \end{pmatrix}` and
:math:`x_3 = \begin{pmatrix} 1 \\ 0 \end{pmatrix}` at once:
>>> holder( np.array([[0., 0., 1.], [0., 1., 0.]]) )
... # doctest: +NORMALIZE_WHITESPACE
array([-0. , -0. , -1.66377043])
The result should be :math:`f(x_1) = 0`, :math:`f(x_2) = 0` and :math:`f(x_3) = -1.66377043`.
Parameters
----------
x : array_like
One dimension Numpy array of the point at which the Hölder table function is to be computed
or a two dimension Numpy array of points at which the Hölder table function is to be computed.
Returns
-------
float or array_like
The value(s) of the Hölder table function for the given point(s) `x`.
"""
assert x.shape[0] == 2, x.shape
return -np.abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(np.abs(1.0 - np.sqrt(x[0]**2.0 + x[1]**2.0)/np.pi )))
class Holder(_ObjectiveFunction):
"""
TODO
"""
def __init__(self, ndim):
super().__init__()
self._objective_function = holder
self.ndim = ndim
if self.ndim != 2:
raise ValueError("The holder function is defined for solution spaces having 2 dimensions.")
self.bounds = np.ones((2, self.ndim)) # TODO: take this or the transpose of this ?
self.bounds[0,:] = -10.
self.bounds[1,:] = 10.
self.continuous = True
self.arg_min = np.ones(self.ndim)
@property
def unimodal(self):
return False
holder2d = Holder(ndim=2)
| mit |
mvbn6789/flask-blog | migrations/env.py | 605 | 2158 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
gavein/sleeping-god | SleepingGodObjects/Vessel.py | 1 | 2849 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
from Constants import WEAR_AT_TURN, OXYGEN_AT_TURN, CARGO_WATER, CARGO_MINERALS
from SleepingGodObjects.GameObjects import GameObject
class Vessel(GameObject):
def __init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo={},
oxygen=0,
hull=0,
wear_resistance=0):
GameObject.__init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks)
self.cargo = cargo
self.cargo_keys = [
CARGO_WATER,
CARGO_MINERALS
]
for key in self.cargo_keys:
if not self.cargo.has_key(key):
self.cargo[key] = 0
self.oxygen = oxygen
self.oxygen_max = oxygen
self.hull = hull
self.wear = hull
self.wear_resistance = wear_resistance
def move(self, dx, dy):
self.pos_x += dx
self.pos_y += dy
turn_wear = WEAR_AT_TURN - self.wear_resistance
self.wear -= turn_wear
self.oxygen -= OXYGEN_AT_TURN
def cargo_info(self, key):
if self.cargo.has_key(key):
return self.cargo[key]
class PlayerVessel(Vessel):
SOLAR_SAIL = u"фотонный парус"
def __init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo={},
oxygen=0,
hull=0,
wear_resistance=0,
propulsion=SOLAR_SAIL):
Vessel.__init__(
self,
pos_x,
pos_y,
char,
label,
color,
blocks,
cargo,
oxygen,
hull,
wear_resistance)
self.propulsion = propulsion
self.abilities = []
def increase_resources(self, minerals, water):
self.cargo[CARGO_MINERALS] += minerals
self.cargo[CARGO_WATER] += water
def add_ability(self, ability):
self.abilities.append(ability)
def get_ability_name(self, abilitY):
return ability.name
def get_ability_description(self, ability):
return ability.description
def use_ability(self, ability, *args):
if ability in self.abilities:
ability.use(args)
| gpl-3.0 |
tempbottle/pykafka | pykafka/utils/compat.py | 6 | 3887 | import sys
__all__ = ['PY3', 'Semaphore']
PY3 = sys.version_info[0] >= 3
if PY3:
from threading import Semaphore
else:
from threading import Condition, Lock
# could use monotonic.monotonic() backport as well here...
from time import time as _time
# -- begin unmodified backport of threading.Semaphore from Python 3.4 -- #
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012, 2013, 2014, 2015 Python Software Foundation. All rights reserved.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value += 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
# -- end backport of Semaphore from Python 3.4 -- #
| apache-2.0 |
amarouni/incubator-beam | sdks/python/apache_beam/runners/direct/transform_evaluator.py | 2 | 19545 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An evaluator of a specific application of a transform."""
from __future__ import absolute_import
import collections
from apache_beam import coders
from apache_beam import pvalue
from apache_beam.internal import pickler
import apache_beam.io as io
from apache_beam.runners.common import DoFnRunner
from apache_beam.runners.common import DoFnState
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.runners.direct.transform_result import TransformResult
from apache_beam.runners.dataflow.native_io.iobase import _NativeWrite # pylint: disable=protected-access
from apache_beam.transforms import core
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import WindowedValue
from apache_beam.transforms.trigger import _CombiningValueStateTag
from apache_beam.transforms.trigger import _ListStateTag
from apache_beam.typehints.typecheck import OutputCheckWrapperDoFn
from apache_beam.typehints.typecheck import TypeCheckError
from apache_beam.typehints.typecheck import TypeCheckWrapperDoFn
from apache_beam.utils import counters
from apache_beam.options.pipeline_options import TypeOptions
class TransformEvaluatorRegistry(object):
"""For internal use only; no backwards-compatibility guarantees.
Creates instances of TransformEvaluator for the application of a transform.
"""
def __init__(self, evaluation_context):
assert evaluation_context
self._evaluation_context = evaluation_context
self._evaluators = {
io.Read: _BoundedReadEvaluator,
core.Flatten: _FlattenEvaluator,
core.ParDo: _ParDoEvaluator,
core._GroupByKeyOnly: _GroupByKeyOnlyEvaluator,
_NativeWrite: _NativeWriteEvaluator,
}
self._root_bundle_providers = {
core.PTransform: DefaultRootBundleProvider,
}
def get_evaluator(
self, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container):
"""Returns a TransformEvaluator suitable for processing given inputs."""
assert applied_ptransform
assert bool(applied_ptransform.side_inputs) == bool(side_inputs)
# Walk up the class hierarchy to find an evaluable type. This is necessary
# for supporting sub-classes of core transforms.
for cls in applied_ptransform.transform.__class__.mro():
evaluator = self._evaluators.get(cls)
if evaluator:
break
if not evaluator:
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
type(applied_ptransform.transform), self))
return evaluator(self._evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs,
scoped_metrics_container)
def get_root_bundle_provider(self, applied_ptransform):
provider_cls = None
for cls in applied_ptransform.transform.__class__.mro():
provider_cls = self._root_bundle_providers.get(cls)
if provider_cls:
break
if not provider_cls:
raise NotImplementedError(
'Root provider for [%s] not implemented in runner %s' % (
type(applied_ptransform.transform), self))
return provider_cls(self._evaluation_context, applied_ptransform)
def should_execute_serially(self, applied_ptransform):
"""Returns True if this applied_ptransform should run one bundle at a time.
Some TransformEvaluators use a global state object to keep track of their
global execution state. For example evaluator for _GroupByKeyOnly uses this
state as an in memory dictionary to buffer keys.
Serially executed evaluators will act as syncing point in the graph and
execution will not move forward until they receive all of their inputs. Once
they receive all of their input, they will release the combined output.
Their output may consist of multiple bundles as they may divide their output
into pieces before releasing.
Args:
applied_ptransform: Transform to be used for execution.
Returns:
True if executor should execute applied_ptransform serially.
"""
return isinstance(applied_ptransform.transform,
(core._GroupByKeyOnly, _NativeWrite))
class RootBundleProvider(object):
"""Provides bundles for the initial execution of a root transform."""
def __init__(self, evaluation_context, applied_ptransform):
self._evaluation_context = evaluation_context
self._applied_ptransform = applied_ptransform
def get_root_bundles(self):
raise NotImplementedError
class DefaultRootBundleProvider(RootBundleProvider):
"""Provides an empty bundle by default for root transforms."""
def get_root_bundles(self):
input_node = pvalue.PBegin(self._applied_ptransform.transform.pipeline)
empty_bundle = (
self._evaluation_context.create_empty_committed_bundle(input_node))
return [empty_bundle]
class _TransformEvaluator(object):
"""An evaluator of a specific application of a transform."""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
self._evaluation_context = evaluation_context
self._applied_ptransform = applied_ptransform
self._input_committed_bundle = input_committed_bundle
self._side_inputs = side_inputs
self._expand_outputs()
self._execution_context = evaluation_context.get_execution_context(
applied_ptransform)
self.scoped_metrics_container = scoped_metrics_container
with scoped_metrics_container:
self.start_bundle()
def _expand_outputs(self):
outputs = set()
for pval in self._applied_ptransform.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
outputs.add(v)
self._outputs = frozenset(outputs)
def _split_list_into_bundles(
self, output_pcollection, elements, max_element_per_bundle,
element_size_fn):
"""Splits elements, an iterable, into multiple output bundles.
Args:
output_pcollection: PCollection that the elements belong to.
elements: elements to be chunked into bundles.
max_element_per_bundle: (approximately) the maximum element per bundle.
If it is None, only a single bundle will be produced.
element_size_fn: Function to return the size of a given element.
Returns:
List of output uncommitted bundles with at least one bundle.
"""
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles = [bundle]
for element in elements:
if max_element_per_bundle and bundle_size >= max_element_per_bundle:
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles.append(bundle)
bundle.output(element)
bundle_size += element_size_fn(element)
return bundles
def start_bundle(self):
"""Starts a new bundle."""
pass
def process_element(self, element):
"""Processes a new element as part of the current bundle."""
raise NotImplementedError('%s do not process elements.', type(self))
def finish_bundle(self):
"""Finishes the bundle and produces output."""
pass
class _BoundedReadEvaluator(_TransformEvaluator):
"""TransformEvaluator for bounded Read transform."""
# After some benchmarks, 1000 was optimal among {100,1000,10000}
MAX_ELEMENT_PER_BUNDLE = 1000
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
self._source = applied_ptransform.transform.source
self._source.pipeline_options = evaluation_context.pipeline_options
super(_BoundedReadEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def finish_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
def _read_values_to_bundles(reader):
read_result = [GlobalWindows.windowed_value(e) for e in reader]
return self._split_list_into_bundles(
output_pcollection, read_result,
_BoundedReadEvaluator.MAX_ELEMENT_PER_BUNDLE, lambda _: 1)
if isinstance(self._source, io.iobase.BoundedSource):
# Getting a RangeTracker for the default range of the source and reading
# the full source using that.
range_tracker = self._source.get_range_tracker(None, None)
reader = self._source.read(range_tracker)
bundles = _read_values_to_bundles(reader)
else:
with self._source.reader() as reader:
bundles = _read_values_to_bundles(reader)
return TransformResult(
self._applied_ptransform, bundles, None, None, None)
class _FlattenEvaluator(_TransformEvaluator):
"""TransformEvaluator for Flatten transform."""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_FlattenEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def start_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
self.bundle = self._evaluation_context.create_bundle(output_pcollection)
def process_element(self, element):
self.bundle.output(element)
def finish_bundle(self):
bundles = [self.bundle]
return TransformResult(
self._applied_ptransform, bundles, None, None, None)
class _TaggedReceivers(dict):
"""Received ParDo output and redirect to the associated output bundle."""
def __init__(self, evaluation_context):
self._evaluation_context = evaluation_context
self._null_receiver = None
self._undeclared_in_memory_tag_values = None
super(_TaggedReceivers, self).__init__()
@property
def undeclared_in_memory_tag_values(self):
assert (not self._undeclared_in_memory_tag_values
or self._evaluation_context.has_cache)
return self._undeclared_in_memory_tag_values
class NullReceiver(object):
"""Ignores undeclared outputs, default execution mode."""
def output(self, element):
pass
class _InMemoryReceiver(object):
"""Buffers undeclared outputs to the given dictionary."""
def __init__(self, target, tag):
self._target = target
self._tag = tag
def output(self, element):
self._target[self._tag].append(element)
def __missing__(self, key):
if self._evaluation_context.has_cache:
if not self._undeclared_in_memory_tag_values:
self._undeclared_in_memory_tag_values = collections.defaultdict(list)
receiver = _TaggedReceivers._InMemoryReceiver(
self._undeclared_in_memory_tag_values, key)
else:
if not self._null_receiver:
self._null_receiver = _TaggedReceivers.NullReceiver()
receiver = self._null_receiver
return receiver
class _ParDoEvaluator(_TransformEvaluator):
"""TransformEvaluator for ParDo transform."""
def start_bundle(self):
transform = self._applied_ptransform.transform
self._tagged_receivers = _TaggedReceivers(self._evaluation_context)
for output_tag in self._applied_ptransform.outputs:
output_pcollection = pvalue.PCollection(None, tag=output_tag)
output_pcollection.producer = self._applied_ptransform
self._tagged_receivers[output_tag] = (
self._evaluation_context.create_bundle(output_pcollection))
self._tagged_receivers[output_tag].tag = output_tag
self._counter_factory = counters.CounterFactory()
# TODO(aaltay): Consider storing the serialized form as an optimization.
dofn = pickler.loads(pickler.dumps(transform.dofn))
pipeline_options = self._evaluation_context.pipeline_options
if (pipeline_options is not None
and pipeline_options.view_as(TypeOptions).runtime_type_check):
dofn = TypeCheckWrapperDoFn(dofn, transform.get_type_hints())
dofn = OutputCheckWrapperDoFn(dofn, self._applied_ptransform.full_label)
self.runner = DoFnRunner(
dofn, transform.args, transform.kwargs,
self._side_inputs,
self._applied_ptransform.inputs[0].windowing,
tagged_receivers=self._tagged_receivers,
step_name=self._applied_ptransform.full_label,
state=DoFnState(self._counter_factory),
scoped_metrics_container=self.scoped_metrics_container)
self.runner.start()
def process_element(self, element):
self.runner.process(element)
def finish_bundle(self):
self.runner.finish()
bundles = self._tagged_receivers.values()
result_counters = self._counter_factory.get_counters()
return TransformResult(
self._applied_ptransform, bundles, None, result_counters, None,
self._tagged_receivers.undeclared_in_memory_tag_values)
class _GroupByKeyOnlyEvaluator(_TransformEvaluator):
"""TransformEvaluator for _GroupByKeyOnly transform."""
MAX_ELEMENT_PER_BUNDLE = None
ELEMENTS_TAG = _ListStateTag('elements')
COMPLETION_TAG = _CombiningValueStateTag('completed', any)
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_GroupByKeyOnlyEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
@property
def _is_final_bundle(self):
return (self._execution_context.watermarks.input_watermark
== WatermarkManager.WATERMARK_POS_INF)
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
self.global_state = self.step_context.get_keyed_state(None)
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
# The input type of a GroupByKey will be KV[Any, Any] or more specific.
kv_type_hint = (
self._applied_ptransform.transform.get_type_hints().input_types[0])
self.key_coder = coders.registry.get_coder(kv_type_hint[0].tuple_types[0])
def process_element(self, element):
assert not self.global_state.get_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG)
if (isinstance(element, WindowedValue)
and isinstance(element.value, collections.Iterable)
and len(element.value) == 2):
k, v = element.value
encoded_k = self.key_coder.encode(k)
state = self.step_context.get_keyed_state(encoded_k)
state.add_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG, v)
else:
raise TypeCheckError('Input to _GroupByKeyOnly must be a PCollection of '
'windowed key-value pairs. Instead received: %r.'
% element)
def finish_bundle(self):
if self._is_final_bundle:
if self.global_state.get_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG):
# Ignore empty bundles after emitting output. (This may happen because
# empty bundles do not affect input watermarks.)
bundles = []
else:
gbk_result = []
# TODO(ccy): perhaps we can clean this up to not use this
# internal attribute of the DirectStepContext.
for encoded_k in self.step_context.keyed_existing_state:
# Ignore global state.
if encoded_k is None:
continue
k = self.key_coder.decode(encoded_k)
state = self.step_context.get_keyed_state(encoded_k)
vs = state.get_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG)
gbk_result.append(GlobalWindows.windowed_value((k, vs)))
def len_element_fn(element):
_, v = element.value
return len(v)
bundles = self._split_list_into_bundles(
self.output_pcollection, gbk_result,
_GroupByKeyOnlyEvaluator.MAX_ELEMENT_PER_BUNDLE, len_element_fn)
self.global_state.add_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG, True)
hold = WatermarkManager.WATERMARK_POS_INF
else:
bundles = []
hold = WatermarkManager.WATERMARK_NEG_INF
return TransformResult(
self._applied_ptransform, bundles, None, None, hold)
class _NativeWriteEvaluator(_TransformEvaluator):
"""TransformEvaluator for _NativeWrite transform."""
ELEMENTS_TAG = _ListStateTag('elements')
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_NativeWriteEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
assert applied_ptransform.transform.sink
self._sink = applied_ptransform.transform.sink
@property
def _is_final_bundle(self):
return (self._execution_context.watermarks.input_watermark
== WatermarkManager.WATERMARK_POS_INF)
@property
def _has_already_produced_output(self):
return (self._execution_context.watermarks.output_watermark
== WatermarkManager.WATERMARK_POS_INF)
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
self.global_state = self.step_context.get_keyed_state(None)
def process_element(self, element):
self.global_state.add_state(
None, _NativeWriteEvaluator.ELEMENTS_TAG, element)
def finish_bundle(self):
# finish_bundle will append incoming bundles in memory until all the bundles
# carrying data is processed. This is done to produce only a single output
# shard (some tests depends on this behavior). It is possible to have
# incoming empty bundles after the output is produced, these bundles will be
# ignored and would not generate additional output files.
# TODO(altay): Do not wait until the last bundle to write in a single shard.
if self._is_final_bundle:
elements = self.global_state.get_state(
None, _NativeWriteEvaluator.ELEMENTS_TAG)
if self._has_already_produced_output:
# Ignore empty bundles that arrive after the output is produced.
assert elements == []
else:
self._sink.pipeline_options = self._evaluation_context.pipeline_options
with self._sink.writer() as writer:
for v in elements:
writer.Write(v.value)
hold = WatermarkManager.WATERMARK_POS_INF
else:
hold = WatermarkManager.WATERMARK_NEG_INF
return TransformResult(
self._applied_ptransform, [], None, None, hold)
| apache-2.0 |
milmd90/TwitterBot | build/lib/twitter/parse_tweet.py | 6 | 3610 | #!/usr/bin/env python
import re
class Emoticons:
POSITIVE = ["*O", "*-*", "*O*", "*o*", "* *",
":P", ":D", ":d", ":p",
";P", ";D", ";d", ";p",
":-)", ";-)", ":=)", ";=)",
":<)", ":>)", ";>)", ";=)",
"=}", ":)", "(:;)",
"(;", ":}", "{:", ";}",
"{;:]",
"[;", ":')", ";')", ":-3",
"{;", ":]",
";-3", ":-x", ";-x", ":-X",
";-X", ":-}", ";-=}", ":-]",
";-]", ":-.)",
"^_^", "^-^"]
NEGATIVE = [":(", ";(", ":'(",
"=(", "={", "):", ");",
")':", ")';", ")=", "}=",
";-{{", ";-{", ":-{{", ":-{",
":-(", ";-(",
":,)", ":'{",
"[:", ";]"
]
class ParseTweet(object):
# compile once on import
regexp = {"RT": "^RT", "MT": r"^MT", "ALNUM": r"(@[a-zA-Z0-9_]+)",
"HASHTAG": r"(#[\w\d]+)", "URL": r"([https://|http://]?[a-zA-Z\d\/]+[\.]+[a-zA-Z\d\/\.]+)",
"SPACES": r"\s+"}
regexp = dict((key, re.compile(value)) for key, value in regexp.items())
def __init__(self, timeline_owner, tweet):
""" timeline_owner : twitter handle of user account. tweet - 140 chars from feed; object does all computation on construction
properties:
RT, MT - boolean
URLs - list of URL
Hashtags - list of tags
"""
self.Owner = timeline_owner
self.tweet = tweet
self.UserHandles = ParseTweet.getUserHandles(tweet)
self.Hashtags = ParseTweet.getHashtags(tweet)
self.URLs = ParseTweet.getURLs(tweet)
self.RT = ParseTweet.getAttributeRT(tweet)
self.MT = ParseTweet.getAttributeMT(tweet)
self.Emoticon = ParseTweet.getAttributeEmoticon(tweet)
# additional intelligence
if ( self.RT and len(self.UserHandles) > 0 ): # change the owner of tweet?
self.Owner = self.UserHandles[0]
return
def __str__(self):
""" for display method """
return "owner %s, urls: %d, hashtags %d, user_handles %d, len_tweet %d, RT = %s, MT = %s" % \
(self.Owner, len(self.URLs), len(self.Hashtags), len(self.UserHandles), len(self.tweet), self.RT, self.MT)
@staticmethod
def getAttributeEmoticon(tweet):
""" see if tweet is contains any emoticons, +ve, -ve or neutral """
emoji = list()
for tok in re.split(ParseTweet.regexp["SPACES"], tweet.strip()):
if tok in Emoticons.POSITIVE:
emoji.append( tok )
continue
if tok in Emoticons.NEGATIVE:
emoji.append( tok )
return emoji
@staticmethod
def getAttributeRT(tweet):
""" see if tweet is a RT """
return re.search(ParseTweet.regexp["RT"], tweet.strip()) is not None
@staticmethod
def getAttributeMT(tweet):
""" see if tweet is a MT """
return re.search(ParseTweet.regexp["MT"], tweet.strip()) is not None
@staticmethod
def getUserHandles(tweet):
""" given a tweet we try and extract all user handles in order of occurrence"""
return re.findall(ParseTweet.regexp["ALNUM"], tweet)
@staticmethod
def getHashtags(tweet):
""" return all hashtags"""
return re.findall(ParseTweet.regexp["HASHTAG"], tweet)
@staticmethod
def getURLs(tweet):
""" URL : [http://]?[\w\.?/]+"""
return re.findall(ParseTweet.regexp["URL"], tweet)
| apache-2.0 |
drmrd/ansible | lib/ansible/modules/packaging/os/homebrew_cask.py | 18 | 22697 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew_cask
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
- "Enric Lluelles (@enriclluelles)"
requirements:
- "python >= 2.6"
short_description: Install/uninstall homebrew casks.
description:
- Manages Homebrew casks.
version_added: "1.6"
options:
name:
description:
- name of cask to install/remove
required: true
aliases: ['pkg', 'package', 'cask']
path:
description:
- "':' separated list of paths to search for 'brew' executable."
default: '/usr/local/bin'
state:
description:
- state of the cask
choices: [ 'present', 'absent', 'upgraded' ]
default: present
update_homebrew:
description:
- update homebrew itself first. Note that C(brew cask update) is
a synonym for C(brew update).
type: bool
default: 'no'
aliases: ['update-brew']
version_added: "2.2"
install_options:
description:
- options flags to install a package
aliases: ['options']
version_added: "2.2"
accept_external_apps:
description:
- allow external apps
type: bool
default: 'no'
version_added: "2.5.0"
upgrade_all:
description:
- upgrade all casks (mutually exclusive with `upgrade`)
type: bool
default: 'no'
version_added: "2.5.0"
upgrade:
description:
- upgrade all casks (mutually exclusive with `upgrade_all`)
type: bool
default: 'no'
version_added: "2.5.0"
'''
EXAMPLES = '''
- homebrew_cask:
name: alfred
state: present
- homebrew_cask:
name: alfred
state: absent
- homebrew_cask:
name: alfred
state: present
install_options: 'appdir=/Applications'
- homebrew_cask:
name: alfred
state: present
install_options: 'debug,appdir=/Applications'
- homebrew_cask:
name: alfred
state: present
allow_external_apps: True
- homebrew_cask:
name: alfred
state: absent
install_options: force
- homebrew_cask:
upgrade_all: true
- homebrew_cask:
name: alfred
state: upgraded
install_options: force
'''
import os.path
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems, string_types
# exceptions -------------------------------------------------------------- {{{
class HomebrewCaskException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class HomebrewCask(object):
'''A class to manage Homebrew casks.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_CASK_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
/ # slash (for taps)
- # dashes
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- colons
- os.path.sep
'''
if isinstance(path, (string_types)):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, string_types)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_cask(cls, cask):
'''A valid cask is either None or alphanumeric + backslashes.'''
if cask is None:
return True
return (
isinstance(cask, string_types)
and not cls.INVALID_CASK_REGEX.search(cask)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- installed
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, string_types)
and state.lower() in (
'installed',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewCaskException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewCaskException(self.message)
else:
if isinstance(path, string_types):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewCaskException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_cask(self):
return self._current_cask
@current_cask.setter
def current_cask(self, cask):
if not self.valid_cask(cask):
self._current_cask = None
self.failed = True
self.message = 'Invalid cask: {0}.'.format(cask)
raise HomebrewCaskException(self.message)
else:
self._current_cask = cask
return cask
# /class properties -------------------------------------------- }}}
def __init__(self, module, path=path, casks=None, state=None,
update_homebrew=False, install_options=None,
accept_external_apps=False, upgrade_all=False):
if not install_options:
install_options = list()
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, casks=casks,
state=state, update_homebrew=update_homebrew,
install_options=install_options,
accept_external_apps=accept_external_apps,
upgrade_all=upgrade_all, )
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in iteritems(kwargs):
setattr(self, key, val)
def _prep(self):
self._prep_brew_path()
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewCaskException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewCaskException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewCaskException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_cask_is_outdated(self):
if not self.valid_cask(self.current_cask):
return False
rc, out, err = self.module.run_command([
self.brew_path,
'cask',
'outdated',
self.current_cask,
])
return out != ""
def _current_cask_is_installed(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
cmd = [
"{brew_path}".format(brew_path=self.brew_path),
"cask",
"list",
self.current_cask
]
rc, out, err = self.module.run_command(cmd)
if re.search(r'Error: Cask .* is not installed.', err):
return False
else:
return True
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.upgrade_all:
return self._upgrade_all()
if self.casks:
if self.state == 'installed':
return self._install_casks()
elif self.state == 'upgraded':
return self._upgrade_casks()
elif self.state == 'absent':
return self._uninstall_casks()
self.failed = True
self.message = "You must select a cask to install."
raise HomebrewCaskException(self.message)
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
])
if rc == 0:
if out and isinstance(out, string_types):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
# /updated ------------------------------- }}}
# _upgrade_all --------------------------- {{{
def _upgrade_all(self):
if self.module.check_mode:
self.changed = True
self.message = 'Casks would be upgraded.'
raise HomebrewCaskException(self.message)
rc, out, err = self.module.run_command([
self.brew_path,
'cask',
'upgrade',
])
if rc == 0:
if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE):
self.message = 'Homebrew casks already upgraded.'
else:
self.changed = True
self.message = 'Homebrew casks upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
# /_upgrade_all -------------------------- }}}
# installed ------------------------------ {{{
def _install_current_cask(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if self._current_cask_is_installed():
self.unchanged_count += 1
self.message = 'Cask already installed: {0}'.format(
self.current_cask,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be installed: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
opts = (
[self.brew_path, 'cask', 'install', self.current_cask]
+ self.install_options
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_cask_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Cask installed: {0}'.format(self.current_cask)
return True
elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err):
self.unchanged_count += 1
self.message = 'Cask already installed: {0}'.format(
self.current_cask,
)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _install_casks(self):
for cask in self.casks:
self.current_cask = cask
self._install_current_cask()
return True
# /installed ----------------------------- }}}
# upgraded ------------------------------- {{{
def _upgrade_current_cask(self):
command = 'upgrade'
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if not self._current_cask_is_installed():
command = 'install'
if self._current_cask_is_installed() and not self._current_cask_is_outdated():
self.message = 'Cask is already upgraded: {0}'.format(
self.current_cask,
)
self.unchanged_count += 1
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be upgraded: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
opts = (
[self.brew_path, 'cask', command]
+ self.install_options
+ [self.current_cask]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_cask_is_installed() and not self._current_cask_is_outdated():
self.changed_count += 1
self.changed = True
self.message = 'Cask upgraded: {0}'.format(self.current_cask)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _upgrade_casks(self):
for cask in self.casks:
self.current_cask = cask
self._upgrade_current_cask()
return True
# /upgraded ------------------------------ }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_cask(self):
if not self.valid_cask(self.current_cask):
self.failed = True
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
if not self._current_cask_is_installed():
self.unchanged_count += 1
self.message = 'Cask already uninstalled: {0}'.format(
self.current_cask,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Cask would be uninstalled: {0}'.format(
self.current_cask
)
raise HomebrewCaskException(self.message)
cmd = [opt
for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask)
if opt]
rc, out, err = self.module.run_command(cmd)
if not self._current_cask_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewCaskException(self.message)
def _uninstall_casks(self):
for cask in self.casks:
self.current_cask = cask
self._uninstall_current_cask()
return True
# /uninstalled --------------------------- }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "cask"],
required=False,
type='list',
),
path=dict(
default="/usr/local/bin",
required=False,
type='path',
),
state=dict(
default="present",
choices=[
"present", "installed",
"latest", "upgraded",
"absent", "removed", "uninstalled",
],
),
update_homebrew=dict(
default=False,
aliases=["update-brew"],
type='bool',
),
install_options=dict(
default=None,
aliases=['options'],
type='list',
),
accept_external_apps=dict(
default=False,
type='bool',
),
upgrade_all=dict(
default=False,
aliases=["upgrade"],
type='bool',
),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p['name']:
casks = p['name']
else:
casks = None
path = p['path']
if path:
path = path.split(':')
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('latest', 'upgraded'):
state = 'upgraded'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
update_homebrew = p['update_homebrew']
upgrade_all = p['upgrade_all']
p['install_options'] = p['install_options'] or []
install_options = ['--{0}'.format(install_option)
for install_option in p['install_options']]
accept_external_apps = p['accept_external_apps']
brew_cask = HomebrewCask(module=module, path=path, casks=casks,
state=state, update_homebrew=update_homebrew,
install_options=install_options,
accept_external_apps=accept_external_apps,
upgrade_all=upgrade_all,
)
(failed, changed, message) = brew_cask.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
if __name__ == '__main__':
main()
| gpl-3.0 |
g12mcgov/home-assistant | homeassistant/components/light/hue.py | 13 | 6679 | """
homeassistant.components.light.hue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Hue lights.
"""
import logging
import socket
from datetime import timedelta
from urllib.parse import urlparse
from homeassistant.loader import get_component
import homeassistant.util as util
from homeassistant.const import CONF_HOST, DEVICE_DEFAULT_NAME
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_XY_COLOR, ATTR_TRANSITION,
ATTR_FLASH, FLASH_LONG, FLASH_SHORT, ATTR_EFFECT,
EFFECT_COLORLOOP)
REQUIREMENTS = ['phue==0.8']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
PHUE_CONFIG_FILE = "phue.conf"
# Map ip to request id for configuring
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Gets the Hue lights. """
try:
# pylint: disable=unused-variable
import phue # noqa
except ImportError:
_LOGGER.exception("Error while importing dependency phue.")
return
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST, None)
# Only act if we are not already configuring this host
if host in _CONFIGURING:
return
setup_bridge(host, hass, add_devices_callback)
def setup_bridge(host, hass, add_devices_callback):
""" Setup a phue bridge based on host parameter. """
import phue
try:
bridge = phue.Bridge(
host,
config_file_path=hass.config.path(PHUE_CONFIG_FILE))
except ConnectionRefusedError: # Wrong host was given
_LOGGER.exception("Error connecting to the Hue bridge at %s", host)
return
except phue.PhueRegistrationException:
_LOGGER.warning("Connected to Hue at %s but not registered.", host)
request_configuration(host, hass, add_devices_callback)
return
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
""" Updates the Hue light objects with latest info from the bridge. """
try:
api = bridge.get_api()
except socket.error:
# socket.error when we cannot reach Hue
_LOGGER.exception("Cannot reach the bridge")
return
api_states = api.get('lights')
if not isinstance(api_states, dict):
_LOGGER.error("Got unexpected result from Hue API")
return
new_lights = []
for light_id, info in api_states.items():
if light_id not in lights:
lights[light_id] = HueLight(int(light_id), info,
bridge, update_lights)
new_lights.append(lights[light_id])
else:
lights[light_id].info = info
if new_lights:
add_devices_callback(new_lights)
update_lights()
def request_configuration(host, hass, add_devices_callback):
""" Request configuration steps from the user. """
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], "Failed to register, please try again.")
return
def hue_configuration_callback(data):
""" Actions to do when our configuration callback is called. """
setup_bridge(host, hass, add_devices_callback)
_CONFIGURING[host] = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
class HueLight(Light):
""" Represents a Hue light """
def __init__(self, light_id, info, bridge, update_lights):
self.light_id = light_id
self.info = info
self.bridge = bridge
self.update_lights = update_lights
@property
def unique_id(self):
""" Returns the id of this Hue light """
return "{}.{}".format(
self.__class__, self.info.get('uniqueid', self.name))
@property
def name(self):
""" Get the mame of the Hue light. """
return self.info.get('name', DEVICE_DEFAULT_NAME)
@property
def brightness(self):
""" Brightness of this light between 0..255. """
return self.info['state']['bri']
@property
def color_xy(self):
""" XY color value. """
return self.info['state'].get('xy')
@property
def is_on(self):
""" True if device is on. """
self.update_lights()
return self.info['state']['reachable'] and self.info['state']['on']
def turn_on(self, **kwargs):
""" Turn the specified or all lights on. """
command = {'on': True}
if ATTR_TRANSITION in kwargs:
# Transition time is in 1/10th seconds and cannot exceed
# 900 seconds.
command['transitiontime'] = min(9000, kwargs[ATTR_TRANSITION] * 10)
if ATTR_BRIGHTNESS in kwargs:
command['bri'] = kwargs[ATTR_BRIGHTNESS]
if ATTR_XY_COLOR in kwargs:
command['xy'] = kwargs[ATTR_XY_COLOR]
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command['alert'] = 'lselect'
elif flash == FLASH_SHORT:
command['alert'] = 'select'
else:
command['alert'] = 'none'
effect = kwargs.get(ATTR_EFFECT)
if effect == EFFECT_COLORLOOP:
command['effect'] = 'colorloop'
else:
command['effect'] = 'none'
self.bridge.set_light(self.light_id, command)
def turn_off(self, **kwargs):
""" Turn the specified or all lights off. """
command = {'on': False}
if ATTR_TRANSITION in kwargs:
# Transition time is in 1/10th seconds and cannot exceed
# 900 seconds.
command['transitiontime'] = min(9000, kwargs[ATTR_TRANSITION] * 10)
self.bridge.set_light(self.light_id, command)
def update(self):
""" Synchronize state with bridge. """
self.update_lights(no_throttle=True)
| mit |
iismd17/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
edlabh/SickRage | lib/unidecode/x068.py | 252 | 4674 | data = (
'Zhi ', # 0x00
'Liu ', # 0x01
'Mei ', # 0x02
'Hoy ', # 0x03
'Rong ', # 0x04
'Zha ', # 0x05
'[?] ', # 0x06
'Biao ', # 0x07
'Zhan ', # 0x08
'Jie ', # 0x09
'Long ', # 0x0a
'Dong ', # 0x0b
'Lu ', # 0x0c
'Sayng ', # 0x0d
'Li ', # 0x0e
'Lan ', # 0x0f
'Yong ', # 0x10
'Shu ', # 0x11
'Xun ', # 0x12
'Shuan ', # 0x13
'Qi ', # 0x14
'Zhen ', # 0x15
'Qi ', # 0x16
'Li ', # 0x17
'Yi ', # 0x18
'Xiang ', # 0x19
'Zhen ', # 0x1a
'Li ', # 0x1b
'Su ', # 0x1c
'Gua ', # 0x1d
'Kan ', # 0x1e
'Bing ', # 0x1f
'Ren ', # 0x20
'Xiao ', # 0x21
'Bo ', # 0x22
'Ren ', # 0x23
'Bing ', # 0x24
'Zi ', # 0x25
'Chou ', # 0x26
'Yi ', # 0x27
'Jie ', # 0x28
'Xu ', # 0x29
'Zhu ', # 0x2a
'Jian ', # 0x2b
'Zui ', # 0x2c
'Er ', # 0x2d
'Er ', # 0x2e
'You ', # 0x2f
'Fa ', # 0x30
'Gong ', # 0x31
'Kao ', # 0x32
'Lao ', # 0x33
'Zhan ', # 0x34
'Li ', # 0x35
'Yin ', # 0x36
'Yang ', # 0x37
'He ', # 0x38
'Gen ', # 0x39
'Zhi ', # 0x3a
'Chi ', # 0x3b
'Ge ', # 0x3c
'Zai ', # 0x3d
'Luan ', # 0x3e
'Fu ', # 0x3f
'Jie ', # 0x40
'Hang ', # 0x41
'Gui ', # 0x42
'Tao ', # 0x43
'Guang ', # 0x44
'Wei ', # 0x45
'Kuang ', # 0x46
'Ru ', # 0x47
'An ', # 0x48
'An ', # 0x49
'Juan ', # 0x4a
'Yi ', # 0x4b
'Zhuo ', # 0x4c
'Ku ', # 0x4d
'Zhi ', # 0x4e
'Qiong ', # 0x4f
'Tong ', # 0x50
'Sang ', # 0x51
'Sang ', # 0x52
'Huan ', # 0x53
'Jie ', # 0x54
'Jiu ', # 0x55
'Xue ', # 0x56
'Duo ', # 0x57
'Zhui ', # 0x58
'Yu ', # 0x59
'Zan ', # 0x5a
'Kasei ', # 0x5b
'Ying ', # 0x5c
'Masu ', # 0x5d
'[?] ', # 0x5e
'Zhan ', # 0x5f
'Ya ', # 0x60
'Nao ', # 0x61
'Zhen ', # 0x62
'Dang ', # 0x63
'Qi ', # 0x64
'Qiao ', # 0x65
'Hua ', # 0x66
'Kuai ', # 0x67
'Jiang ', # 0x68
'Zhuang ', # 0x69
'Xun ', # 0x6a
'Suo ', # 0x6b
'Sha ', # 0x6c
'Zhen ', # 0x6d
'Bei ', # 0x6e
'Ting ', # 0x6f
'Gua ', # 0x70
'Jing ', # 0x71
'Bo ', # 0x72
'Ben ', # 0x73
'Fu ', # 0x74
'Rui ', # 0x75
'Tong ', # 0x76
'Jue ', # 0x77
'Xi ', # 0x78
'Lang ', # 0x79
'Liu ', # 0x7a
'Feng ', # 0x7b
'Qi ', # 0x7c
'Wen ', # 0x7d
'Jun ', # 0x7e
'Gan ', # 0x7f
'Cu ', # 0x80
'Liang ', # 0x81
'Qiu ', # 0x82
'Ting ', # 0x83
'You ', # 0x84
'Mei ', # 0x85
'Bang ', # 0x86
'Long ', # 0x87
'Peng ', # 0x88
'Zhuang ', # 0x89
'Di ', # 0x8a
'Xuan ', # 0x8b
'Tu ', # 0x8c
'Zao ', # 0x8d
'Ao ', # 0x8e
'Gu ', # 0x8f
'Bi ', # 0x90
'Di ', # 0x91
'Han ', # 0x92
'Zi ', # 0x93
'Zhi ', # 0x94
'Ren ', # 0x95
'Bei ', # 0x96
'Geng ', # 0x97
'Jian ', # 0x98
'Huan ', # 0x99
'Wan ', # 0x9a
'Nuo ', # 0x9b
'Jia ', # 0x9c
'Tiao ', # 0x9d
'Ji ', # 0x9e
'Xiao ', # 0x9f
'Lu ', # 0xa0
'Huan ', # 0xa1
'Shao ', # 0xa2
'Cen ', # 0xa3
'Fen ', # 0xa4
'Song ', # 0xa5
'Meng ', # 0xa6
'Wu ', # 0xa7
'Li ', # 0xa8
'Li ', # 0xa9
'Dou ', # 0xaa
'Cen ', # 0xab
'Ying ', # 0xac
'Suo ', # 0xad
'Ju ', # 0xae
'Ti ', # 0xaf
'Jie ', # 0xb0
'Kun ', # 0xb1
'Zhuo ', # 0xb2
'Shu ', # 0xb3
'Chan ', # 0xb4
'Fan ', # 0xb5
'Wei ', # 0xb6
'Jing ', # 0xb7
'Li ', # 0xb8
'Bing ', # 0xb9
'Fumoto ', # 0xba
'Shikimi ', # 0xbb
'Tao ', # 0xbc
'Zhi ', # 0xbd
'Lai ', # 0xbe
'Lian ', # 0xbf
'Jian ', # 0xc0
'Zhuo ', # 0xc1
'Ling ', # 0xc2
'Li ', # 0xc3
'Qi ', # 0xc4
'Bing ', # 0xc5
'Zhun ', # 0xc6
'Cong ', # 0xc7
'Qian ', # 0xc8
'Mian ', # 0xc9
'Qi ', # 0xca
'Qi ', # 0xcb
'Cai ', # 0xcc
'Gun ', # 0xcd
'Chan ', # 0xce
'Te ', # 0xcf
'Fei ', # 0xd0
'Pai ', # 0xd1
'Bang ', # 0xd2
'Pou ', # 0xd3
'Hun ', # 0xd4
'Zong ', # 0xd5
'Cheng ', # 0xd6
'Zao ', # 0xd7
'Ji ', # 0xd8
'Li ', # 0xd9
'Peng ', # 0xda
'Yu ', # 0xdb
'Yu ', # 0xdc
'Gu ', # 0xdd
'Hun ', # 0xde
'Dong ', # 0xdf
'Tang ', # 0xe0
'Gang ', # 0xe1
'Wang ', # 0xe2
'Di ', # 0xe3
'Xi ', # 0xe4
'Fan ', # 0xe5
'Cheng ', # 0xe6
'Zhan ', # 0xe7
'Qi ', # 0xe8
'Yuan ', # 0xe9
'Yan ', # 0xea
'Yu ', # 0xeb
'Quan ', # 0xec
'Yi ', # 0xed
'Sen ', # 0xee
'Ren ', # 0xef
'Chui ', # 0xf0
'Leng ', # 0xf1
'Qi ', # 0xf2
'Zhuo ', # 0xf3
'Fu ', # 0xf4
'Ke ', # 0xf5
'Lai ', # 0xf6
'Zou ', # 0xf7
'Zou ', # 0xf8
'Zhuo ', # 0xf9
'Guan ', # 0xfa
'Fen ', # 0xfb
'Fen ', # 0xfc
'Chen ', # 0xfd
'Qiong ', # 0xfe
'Nie ', # 0xff
)
| gpl-3.0 |
apache/spark | python/pyspark/sql/tests/test_pandas_udf_window.py | 18 | 12998 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import array, explode, col, lit, mean, min, max, rank, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, float)
return v + 1
return plus_one
@property
def pandas_scalar_time_two(self):
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_count_udf(self):
@pandas_udf('long', PandasUDFType.GROUPED_AGG)
def count(v):
return len(v)
return count
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing).orderBy('v')
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
@property
def sliding_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, 1)
@property
def sliding_range_window(self):
return Window.partitionBy('id').orderBy('v').rangeBetween(-2, 4)
@property
def growing_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(Window.unboundedPreceding, 3)
@property
def growing_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(Window.unboundedPreceding, 4)
@property
def shrinking_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, Window.unboundedFollowing)
@property
def shrinking_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(-3, Window.unboundedFollowing)
def test_simple(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEqual(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
w = self.unbounded_window
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
def test_bounded_simple(self):
from pyspark.sql.functions import mean, max, min, count
df = self.data
w1 = self.sliding_row_window
w2 = self.shrinking_range_window
plus_one = self.python_plus_one
count_udf = self.pandas_agg_count_udf
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('mean_v', mean_udf(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count_udf(df['v']).over(w2)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('min_v', min_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count(df['v']).over(w2)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('min_v', min(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_growing_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.growing_row_window
w2 = self.growing_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_sliding_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.sliding_row_window
w2 = self.sliding_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_shrinking_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.shrinking_row_window
w2 = self.shrinking_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_bounded_mixed(self):
from pyspark.sql.functions import mean, max
df = self.data
w1 = self.sliding_row_window
w2 = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w1)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w1)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_window import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_mock.py | 4 | 2421 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.deprecated_logging import log
class MockUser(object):
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
return "Mock user response"
@classmethod
def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
pass
def __init__(self):
self.opened_urls = []
def edit(self, files):
pass
def edit_changelog(self, files):
pass
def page(self, message):
pass
def confirm(self, message=None, default='y'):
log(message)
return default == 'y'
def can_open_url(self):
return True
def open_url(self, url):
self.opened_urls.append(url)
if url.startswith("file://"):
log("MOCK: user.open_url: file://...")
return
log("MOCK: user.open_url: %s" % url)
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/core/protobuf/device_properties_pb2.py | 2 | 9216 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/device_properties.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/device_properties.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n0tensorflow/core/protobuf/device_properties.proto\x12\ntensorflow\"\x90\x03\n\x10\x44\x65viceProperties\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0e\n\x06vendor\x18\x02 \x01(\t\x12\r\n\x05model\x18\x03 \x01(\t\x12\x11\n\tfrequency\x18\x04 \x01(\x03\x12\x11\n\tnum_cores\x18\x05 \x01(\x03\x12\x42\n\x0b\x65nvironment\x18\x06 \x03(\x0b\x32-.tensorflow.DeviceProperties.EnvironmentEntry\x12\x15\n\rnum_registers\x18\x07 \x01(\x03\x12\x15\n\rl1_cache_size\x18\x08 \x01(\x03\x12\x15\n\rl2_cache_size\x18\t \x01(\x03\x12\x15\n\rl3_cache_size\x18\n \x01(\x03\x12-\n%shared_memory_size_per_multiprocessor\x18\x0b \x01(\x03\x12\x13\n\x0bmemory_size\x18\x0c \x01(\x03\x12\x11\n\tbandwidth\x18\r \x01(\x03\x1a\x32\n\x10\x45nvironmentEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x1b\x42\x16\x44\x65vicePropertiesProtos\xf8\x01\x01\x62\x06proto3')
)
_DEVICEPROPERTIES_ENVIRONMENTENTRY = _descriptor.Descriptor(
name='EnvironmentEntry',
full_name='tensorflow.DeviceProperties.EnvironmentEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.DeviceProperties.EnvironmentEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.DeviceProperties.EnvironmentEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=415,
serialized_end=465,
)
_DEVICEPROPERTIES = _descriptor.Descriptor(
name='DeviceProperties',
full_name='tensorflow.DeviceProperties',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='tensorflow.DeviceProperties.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vendor', full_name='tensorflow.DeviceProperties.vendor', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model', full_name='tensorflow.DeviceProperties.model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='frequency', full_name='tensorflow.DeviceProperties.frequency', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_cores', full_name='tensorflow.DeviceProperties.num_cores', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='environment', full_name='tensorflow.DeviceProperties.environment', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_registers', full_name='tensorflow.DeviceProperties.num_registers', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l1_cache_size', full_name='tensorflow.DeviceProperties.l1_cache_size', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_cache_size', full_name='tensorflow.DeviceProperties.l2_cache_size', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l3_cache_size', full_name='tensorflow.DeviceProperties.l3_cache_size', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shared_memory_size_per_multiprocessor', full_name='tensorflow.DeviceProperties.shared_memory_size_per_multiprocessor', index=10,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='memory_size', full_name='tensorflow.DeviceProperties.memory_size', index=11,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bandwidth', full_name='tensorflow.DeviceProperties.bandwidth', index=12,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DEVICEPROPERTIES_ENVIRONMENTENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=465,
)
_DEVICEPROPERTIES_ENVIRONMENTENTRY.containing_type = _DEVICEPROPERTIES
_DEVICEPROPERTIES.fields_by_name['environment'].message_type = _DEVICEPROPERTIES_ENVIRONMENTENTRY
DESCRIPTOR.message_types_by_name['DeviceProperties'] = _DEVICEPROPERTIES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeviceProperties = _reflection.GeneratedProtocolMessageType('DeviceProperties', (_message.Message,), dict(
EnvironmentEntry = _reflection.GeneratedProtocolMessageType('EnvironmentEntry', (_message.Message,), dict(
DESCRIPTOR = _DEVICEPROPERTIES_ENVIRONMENTENTRY,
__module__ = 'tensorflow.core.protobuf.device_properties_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DeviceProperties.EnvironmentEntry)
))
,
DESCRIPTOR = _DEVICEPROPERTIES,
__module__ = 'tensorflow.core.protobuf.device_properties_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DeviceProperties)
))
_sym_db.RegisterMessage(DeviceProperties)
_sym_db.RegisterMessage(DeviceProperties.EnvironmentEntry)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('B\026DevicePropertiesProtos\370\001\001'))
_DEVICEPROPERTIES_ENVIRONMENTENTRY.has_options = True
_DEVICEPROPERTIES_ENVIRONMENTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| mit |
Yeraze/CalendarHangout | apiclient/mimeparse.py | 189 | 6486 | # Copyright (C) 2007 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| gpl-2.0 |
dsandeephegde/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/font.py | 11 | 4135 | import ctypes
import logging
import os
import platform
from shutil import copy2, rmtree
from subprocess import call
HERE = os.path.split(__file__)[0]
SYSTEM = platform.system().lower()
class FontInstaller(object):
def __init__(self, font_dir=None, **fonts):
self.font_dir = font_dir
self.installed_fonts = False
self.created_dir = False
self.fonts = fonts
def __enter__(self, options=None):
for _, font_path in self.fonts.items():
font_name = font_path.split('/')[-1]
install = getattr(self, 'install_%s_font' % SYSTEM, None)
if not install:
logging.warning('Font installation not supported on %s',
SYSTEM)
return False
if install(font_name, font_path):
self.installed_fonts = True
logging.info('Installed font: %s', font_name)
else:
logging.warning('Unable to install font: %s', font_name)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.installed_fonts:
return False
for _, font_path in self.fonts.items():
font_name = font_path.split('/')[-1]
remove = getattr(self, 'remove_%s_font' % SYSTEM, None)
if not remove:
logging.warning('Font removal not supported on %s', SYSTEM)
return False
if remove(font_name, font_path):
logging.info('Removed font: %s', font_name)
else:
logging.warning('Unable to remove font: %s', font_name)
def install_linux_font(self, font_name, font_path):
if not self.font_dir:
self.font_dir = os.path.join(os.path.expanduser('~'), '.fonts')
if not os.path.exists(self.font_dir):
os.makedirs(self.font_dir)
self.created_dir = True
if not os.path.exists(os.path.join(self.font_dir, font_name)):
copy2(font_path, self.font_dir)
try:
fc_cache_returncode = call('fc-cache')
return not fc_cache_returncode
except OSError: # If fontconfig doesn't exist, return False
logging.error('fontconfig not available on this Linux system.')
return False
def install_darwin_font(self, font_name, font_path):
if not self.font_dir:
self.font_dir = os.path.join(os.path.expanduser('~'),
'Library/Fonts')
if not os.path.exists(self.font_dir):
os.makedirs(self.font_dir)
self.created_dir = True
if not os.path.exists(os.path.join(self.font_dir, font_name)):
copy2(font_path, self.font_dir)
return True
def install_windows_font(self, _, font_path):
hwnd_broadcast = 0xFFFF
wm_fontchange = 0x001D
gdi32 = ctypes.WinDLL('gdi32')
if gdi32.AddFontResourceW(font_path):
return bool(ctypes.windll.user32.SendMessageW(hwnd_broadcast,
wm_fontchange))
def remove_linux_font(self, font_name, _):
if self.created_dir:
rmtree(self.font_dir)
else:
os.remove('%s/%s' % (self.font_dir, font_name))
try:
fc_cache_returncode = call('fc-cache')
return not fc_cache_returncode
except OSError: # If fontconfig doesn't exist, return False
logging.error('fontconfig not available on this Linux system.')
return False
def remove_darwin_font(self, font_name, _):
if self.created_dir:
rmtree(self.font_dir)
else:
os.remove(os.path.join(self.font_dir, font_name))
return True
def remove_windows_font(self, _, font_path):
hwnd_broadcast = 0xFFFF
wm_fontchange = 0x001D
gdi32 = ctypes.WinDLL('gdi32')
if gdi32.RemoveFontResourceW(font_path):
return bool(ctypes.windll.user32.SendMessageW(hwnd_broadcast,
wm_fontchange))
| mpl-2.0 |
vFense/vFenseAgent-nix | agent/deps/rpm/Python-2.7.5/lib/python2.7/cProfile.py | 169 | 6515 | #! /usr/bin/env python
"""Python interface for the 'lsprof' profiler.
Compatible with the 'profile' module.
"""
__all__ = ["run", "runctx", "help", "Profile"]
import _lsprof
# ____________________________________________________________
# Simple interface
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
result = None
try:
try:
prof = prof.run(statement)
except SystemExit:
pass
finally:
if filename is not None:
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
result = None
try:
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
if filename is not None:
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
# Backwards compatibility.
def help():
print "Documentation for the profile/cProfile modules can be found "
print "in the Python Library Reference, section 'The Python Profiler'."
# ____________________________________________________________
class Profile(_lsprof.Profiler):
"""Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
For custom timer functions returning integers, time_unit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
# Most of the functionality is in the base class.
# This subclass only adds convenient and backward-compatible methods.
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
def dump_stats(self, file):
import marshal
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.disable()
self.snapshot_stats()
def snapshot_stats(self):
entries = self.getstats()
self.stats = {}
callersdicts = {}
# call information
for entry in entries:
func = label(entry.code)
nc = entry.callcount # ncalls column of pstats (before '/')
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
tt = entry.inlinetime # tottime column of pstats
ct = entry.totaltime # cumtime column of pstats
callers = {}
callersdicts[id(entry.code)] = callers
self.stats[func] = cc, nc, tt, ct, callers
# subcall information
for entry in entries:
if entry.calls:
func = label(entry.code)
for subentry in entry.calls:
try:
callers = callersdicts[id(subentry.code)]
except KeyError:
continue
nc = subentry.callcount
cc = nc - subentry.reccallcount
tt = subentry.inlinetime
ct = subentry.totaltime
if func in callers:
prev = callers[func]
nc += prev[0]
cc += prev[1]
tt += prev[2]
ct += prev[3]
callers[func] = nc, cc, tt, ct
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.enable()
try:
exec cmd in globals, locals
finally:
self.disable()
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.enable()
try:
return func(*args, **kw)
finally:
self.disable()
# ____________________________________________________________
def label(code):
if isinstance(code, str):
return ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
return (code.co_filename, code.co_firstlineno, code.co_name)
# ____________________________________________________________
def main():
import os, sys
from optparse import OptionParser
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| lgpl-3.0 |
gosharplite/kubernetes | cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | 19 | 63197 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
from charms.leadership import leader_get, leader_set
from shutil import move
from pathlib import Path
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from urllib.request import Request, urlopen
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import endpoint_from_flag
from charms.reactive import when, when_any, when_not, when_none
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.layer import tls_client
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager',
'kube-scheduler', 'cdk-addons']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
def set_upgrade_needed(forced=False):
set_state('kubernetes-master.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
hookenv.log('set upgrade needed')
if previous_channel is None or not require_manual or forced:
hookenv.log('forcing upgrade')
set_state('kubernetes-master.upgrade-specified')
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
def service_cidr():
''' Return the charm's service-cidr config '''
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def check_for_upgrade_needed():
'''An upgrade charm event was triggered by Juju, react to that here.'''
hookenv.status_set('maintenance', 'Checking resources')
migrate_from_pre_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
if not db.get('snap.resources.fingerprint.initialised'):
# We are here on an upgrade from non-rolling master
# Since this upgrade might also include resource updates eg
# juju upgrade-charm kubernetes-master --resource kube-any=my.snap
# we take no risk and forcibly upgrade the snaps.
# Forcibly means we do not prompt the user to call the upgrade action.
set_upgrade_needed(forced=True)
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Set the auto storage backend to etcd2.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if not auto_storage_backend and is_leader:
leader_set(auto_storage_backend='etcd2')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-master.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('kubernetes-master.upgrade-specified')
def do_upgrade():
install_snaps()
remove_state('kubernetes-master.upgrade-needed')
remove_state('kubernetes-master.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
calculate_and_store_resource_checksums()
db.set('snap.resources.fingerprint.initialised', True)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin", "system:masters")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('config.changed.storage-backend')
def storage_backend_changed():
remove_state('kubernetes-master.components.started')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
if not os.path.isfile(known_tokens):
touch(known_tokens)
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
fp.write('\n')
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@hookenv.atexit
def set_final_status():
''' Set the final status of the charm as we leave hook execution '''
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if not is_state('kube-api-endpoint.available'):
if 'kube-api-endpoint' in goal_state.get('relations', {}):
status = 'waiting'
else:
status = 'blocked'
hookenv.status_set(status, 'Waiting for kube-api-endpoint relation')
return
if not is_state('kube-control.connected'):
if 'kube-control' in goal_state.get('relations', {}):
status = 'waiting'
else:
status = 'blocked'
hookenv.status_set(status, 'Waiting for workers.')
return
upgrade_needed = is_state('kubernetes-master.upgrade-needed')
upgrade_specified = is_state('kubernetes-master.upgrade-specified')
if upgrade_needed and not upgrade_specified:
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
return
if is_state('kubernetes-master.components.started'):
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) != 0:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
return
is_leader = is_state('leadership.is_leader')
authentication_setup = is_state('authentication.setup')
if not is_leader and not authentication_setup:
hookenv.status_set('waiting', 'Waiting on leaders crypto keys.')
return
components_started = is_state('kubernetes-master.components.started')
addons_configured = is_state('cdk-addons.configured')
if components_started and not addons_configured:
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
return
req_sent = is_state('kubernetes-master.cloud-request-sent')
openstack_joined = is_state('endpoint.openstack.joined')
cloud_req = req_sent or openstack_joined
aws_ready = is_state('endpoint.aws.ready')
gcp_ready = is_state('endpoint.gcp.ready')
openstack_ready = is_state('endpoint.openstack.ready')
cloud_ready = aws_ready or gcp_ready or openstack_ready
if cloud_req and not cloud_ready:
hookenv.status_set('waiting', 'waiting for cloud integration')
if addons_configured and not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
return
if hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
return
gpu_available = is_state('kube-control.gpu.available')
gpu_enabled = is_state('kubernetes-master.gpu.enabled')
if gpu_available and not gpu_enabled:
msg = 'GPUs available. Set allow-privileged="auto" to enable.'
hookenv.status_set('active', msg)
return
hookenv.status_set('active', 'Kubernetes master running.')
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when('leadership.set.auto_storage_backend')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This happens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
# TODO: Make sure below relation is handled on change
# https://github.com/kubernetes/kubernetes/issues/43461
handle_etcd_relation(etcd)
# Add CLI options to all components
configure_apiserver(etcd.get_connection_string())
configure_controller_manager()
configure_scheduler()
set_state('kubernetes-master.components.started')
hookenv.open_port(6443)
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistently only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
# We are the leader and the auto_storage_backend is not set meaning
# this is the first time we connect to etcd.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if is_leader and not auto_storage_backend:
if etcd.get_version().startswith('3.'):
leader_set(auto_storage_backend='etcd3')
else:
leader_set(auto_storage_backend='etcd2')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
enableKubeDNS = hookenv.config('enable-kube-dns')
dnsDomain = hookenv.config('dns_domain')
dns_ip = None
if enableKubeDNS:
try:
dns_ip = get_dns_ip()
except CalledProcessError:
hookenv.log("kubedns not ready yet")
return
kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS)
@when('kube-control.connected')
@when('snap.installed.kubectl')
@when('leadership.is_leader')
def create_service_configs(kube_control):
"""Create the users for kubelet"""
should_restart = False
# generate the username/pass for the requesting unit
proxy_token = get_token('system:kube-proxy')
if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin')
if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin')
should_restart = True
requests = kube_control.auth_user()
for request in requests:
username = request[1]['user']
group = request[1]['group']
kubelet_token = get_token(username)
if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<nodeName>
userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group)
kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
if should_restart:
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
def get_ingress_address(relation_name):
try:
network_info = hookenv.network_get(relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-api-endpoint.available')
def send_data(tls, kube_api_endpoint):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
# Get ingress address
ingress_ip = get_ingress_address(kube_api_endpoint.relation_name)
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# maybe they have extra names they want as SANs
extra_sans = hookenv.config('extra_sans')
if extra_sans and not extra_sans == "":
sans.extend(extra_sans.split())
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('config.changed.extra_sans', 'certificates.available',
'kube-api-endpoint.available')
def update_certificate(tls, kube_api_endpoint):
# Using the config.changed.extra_sans flag to catch changes.
# IP changes will take ~5 minutes or so to propagate, but
# it will update.
send_data(tls, kube_api_endpoint)
@when('certificates.server.cert.available',
'kubernetes-master.components.started',
'tls_client.server.certificate.written')
def kick_api_server(tls):
# need to be idempotent and don't want to kick the api server
# without need
if data_changed('cert', tls.get_server_cert()):
# certificate changed, so restart the api server
hookenv.log("Certificate information changed, restarting api server")
restart_apiserver()
tls_client.reset_certificate_write_flag('server')
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
load_gpu_plugin = hookenv.config('enable-nvidia-plugin').lower()
gpuEnable = (get_version('kube-apiserver') >= (1, 9) and
load_gpu_plugin == "auto" and
is_state('kubernetes-master.gpu.enabled'))
registry = hookenv.config('addons-registry')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
dnsEnabled = str(hookenv.config('enable-kube-dns')).lower()
metricsEnabled = str(hookenv.config('enable-metrics')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_deprecated_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'registry=' + registry,
'enable-dashboard=' + dbEnabled,
'enable-kube-dns=' + dnsEnabled,
'enable-metrics=' + metricsEnabled,
'enable-gpu=' + str(gpuEnable).lower()
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except: # NOQA
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('config.changed.authorization-mode',
'kubernetes-master.components.started')
def switch_auth_mode():
config = hookenv.config()
mode = config.get('authorization-mode')
if data_changed('auth-mode', mode):
remove_state('kubernetes-master.components.started')
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when_any('config.changed.api-extra-args',
'config.changed.audit-policy',
'config.changed.audit-webhook-config')
@when('kubernetes-master.components.started')
@when('leadership.set.auto_storage_backend')
@when('etcd.available')
def reconfigure_apiserver(etcd):
configure_apiserver(etcd.get_connection_string())
@when('config.changed.controller-manager-extra-args')
@when('kubernetes-master.components.started')
def on_config_controller_manager_extra_args_change():
configure_controller_manager()
@when('config.changed.scheduler-extra-args')
@when('kubernetes-master.components.started')
def on_config_scheduler_extra_args_change():
configure_scheduler()
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
kube_version = get_version('kube-apiserver')
config = hookenv.config()
if (config['allow-privileged'].lower() == "false" and
kube_version < (1, 9)):
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.privileged')
def gpu_with_no_privileged():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
if get_version('kube-apiserver') < (1, 9):
remove_state('kubernetes-master.gpu.enabled')
@when('kube-control.connected')
@when_not('kube-control.gpu.available')
@when('kubernetes-master.gpu.enabled')
@when('kubernetes-master.components.started')
def gpu_departed(kube_control):
"""We were in gpu mode, but the workers informed us there is
no gpu support anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def restart_apiserver():
hookenv.status_set('maintenance', 'Restarting kube-apiserver')
host.service_restart('snap.kube-apiserver.daemon')
def restart_controller_manager():
hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
host.service_restart('snap.kube-controller-manager.daemon')
def restart_scheduler():
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
cmd = "kubectl get service --namespace kube-system kube-dns --output json"
output = check_output(cmd, shell=True).decode()
svc = json.loads(output)
return svc['spec']['clusterIP']
def get_deprecated_dns_ip():
'''We previously hardcoded the dns ip. This function returns the old
hardcoded value for use with older versions of cdk_addons.'''
interface = ipaddress.IPv4Interface(service_cidr())
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
prev_args_key = 'kubernetes-master.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
# note this is so we remove them from the snap's config
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def remove_if_exists(path):
try:
os.remove(path)
except FileNotFoundError:
pass
def write_audit_config_file(path, contents):
with open(path, 'w') as f:
header = '# Autogenerated by kubernetes-master charm'
f.write(header + '\n' + contents)
def configure_apiserver(etcd_connection_string):
api_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
# at one point in time, this code would set ca-client-cert,
# but this was removed. This was before configure_kubernetes_service
# kept track of old arguments and removed them, so client-ca-cert
# was able to hang around forever stored in the snap configuration.
# This removes that stale configuration from the snap if it still
# exists.
api_opts['client-ca-file'] = 'null'
if is_privileged():
api_opts['allow-privileged'] = 'true'
set_state('kubernetes-master.privileged')
else:
api_opts['allow-privileged'] = 'false'
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts['service-cluster-ip-range'] = service_cidr()
api_opts['min-request-timeout'] = '300'
api_opts['v'] = '4'
api_opts['tls-cert-file'] = server_cert_path
api_opts['tls-private-key-file'] = server_key_path
api_opts['kubelet-certificate-authority'] = ca_cert_path
api_opts['kubelet-client-certificate'] = client_cert_path
api_opts['kubelet-client-key'] = client_key_path
api_opts['logtostderr'] = 'true'
api_opts['insecure-bind-address'] = '127.0.0.1'
api_opts['insecure-port'] = '8080'
api_opts['storage-backend'] = getStorageBackend()
api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
api_opts['kubelet-preferred-address-types'] = \
'[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]'
api_opts['advertise-address'] = get_ingress_address('kube-control')
etcd_dir = '/root/cdk/etcd'
etcd_ca = os.path.join(etcd_dir, 'client-ca.pem')
etcd_key = os.path.join(etcd_dir, 'client-key.pem')
etcd_cert = os.path.join(etcd_dir, 'client-cert.pem')
api_opts['etcd-cafile'] = etcd_ca
api_opts['etcd-keyfile'] = etcd_key
api_opts['etcd-certfile'] = etcd_cert
api_opts['etcd-servers'] = etcd_connection_string
admission_control_pre_1_9 = [
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
admission_control = [
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'PersistentVolumeLabel',
'DefaultStorageClass',
'DefaultTolerationSeconds',
'MutatingAdmissionWebhook',
'ValidatingAdmissionWebhook',
'ResourceQuota'
]
auth_mode = hookenv.config('authorization-mode')
if 'Node' in auth_mode:
admission_control.append('NodeRestriction')
api_opts['authorization-mode'] = auth_mode
kube_version = get_version('kube-apiserver')
if kube_version < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control_pre_1_9.remove('DefaultTolerationSeconds')
if kube_version < (1, 9):
api_opts['admission-control'] = ','.join(admission_control_pre_1_9)
else:
api_opts['admission-control'] = ','.join(admission_control)
if kube_version > (1, 6) and \
hookenv.config('enable-metrics'):
api_opts['requestheader-client-ca-file'] = ca_cert_path
api_opts['requestheader-allowed-names'] = 'client'
api_opts['requestheader-extra-headers-prefix'] = 'X-Remote-Extra-'
api_opts['requestheader-group-headers'] = 'X-Remote-Group'
api_opts['requestheader-username-headers'] = 'X-Remote-User'
api_opts['proxy-client-cert-file'] = client_cert_path
api_opts['proxy-client-key-file'] = client_key_path
api_opts['enable-aggregator-routing'] = 'true'
api_opts['client-ca-file'] = ca_cert_path
if is_state('endpoint.aws.ready'):
api_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'gce'
api_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'openstack'
api_opts['cloud-config'] = str(cloud_config_path)
audit_root = '/root/cdk/audit'
os.makedirs(audit_root, exist_ok=True)
audit_log_path = audit_root + '/audit.log'
api_opts['audit-log-path'] = audit_log_path
api_opts['audit-log-maxsize'] = '100'
api_opts['audit-log-maxbackup'] = '9'
audit_policy_path = audit_root + '/audit-policy.yaml'
audit_policy = hookenv.config('audit-policy')
if audit_policy:
write_audit_config_file(audit_policy_path, audit_policy)
api_opts['audit-policy-file'] = audit_policy_path
else:
remove_if_exists(audit_policy_path)
audit_webhook_config_path = audit_root + '/audit-webhook-config.yaml'
audit_webhook_config = hookenv.config('audit-webhook-config')
if audit_webhook_config:
write_audit_config_file(audit_webhook_config_path,
audit_webhook_config)
api_opts['audit-webhook-config-file'] = audit_webhook_config_path
else:
remove_if_exists(audit_webhook_config_path)
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver()
def configure_controller_manager():
controller_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
# Default to 3 minute resync. TODO: Make this configurable?
controller_opts['min-resync-period'] = '3m'
controller_opts['v'] = '2'
controller_opts['root-ca-file'] = ca_cert_path
controller_opts['logtostderr'] = 'true'
controller_opts['master'] = 'http://127.0.0.1:8080'
controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key'
if is_state('endpoint.aws.ready'):
controller_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'gce'
controller_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'openstack'
controller_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args')
restart_controller_manager()
def configure_scheduler():
scheduler_opts = {}
scheduler_opts['v'] = '2'
scheduler_opts['logtostderr'] = 'true'
scheduler_opts['master'] = 'http://127.0.0.1:8080'
configure_kubernetes_service('kube-scheduler', scheduler_opts,
'scheduler-extra-args')
restart_scheduler()
def setup_basic_auth(password=None, username='admin', uid='admin',
groups=None):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"'.format(password,
username, uid, groups))
else:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user, groups=None):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token,
username,
user,
groups))
else:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
result = json.loads(output)
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
hookenv.log('Checking system pods status: {}'.format(', '.join(
'='.join([pod['metadata']['name'], pod['status']['phase']])
for pod in result['items'])))
all_pending = all(pod['status']['phase'] == 'Pending'
for pod in result['items'])
if is_state('endpoint.gcp.ready') and all_pending:
poke_network_unavailable()
return False
# All pods must be Running or Evicted (which should re-spawn)
all_running = all(pod['status']['phase'] == 'Running' or
pod['status'].get('reason', '') == 'Evicted'
for pod in result['items'])
return all_running
def poke_network_unavailable():
"""
Work around https://github.com/kubernetes/kubernetes/issues/44254 by
manually poking the status into the API server to tell the nodes they have
a network route.
This is needed because kubelet sets the NetworkUnavailable flag and expects
the network plugin to clear it, which only kubenet does. There is some
discussion about refactoring the affected code but nothing has happened
in a while.
"""
cmd = ['kubectl', 'get', 'nodes', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
nodes = json.loads(output)['items']
except CalledProcessError:
hookenv.log('failed to get kube-system nodes')
return
except (KeyError, json.JSONDecodeError) as e:
hookenv.log('failed to parse kube-system node status '
'({}): {}'.format(e, output), hookenv.ERROR)
return
for node in nodes:
node_name = node['metadata']['name']
url = 'http://localhost:8080/api/v1/nodes/{}/status'.format(node_name)
with urlopen(url) as response:
code = response.getcode()
body = response.read().decode('utf8')
if code != 200:
hookenv.log('failed to get node status from {} [{}]: {}'.format(
url, code, body), hookenv.ERROR)
return
try:
node_info = json.loads(body)
conditions = node_info['status']['conditions']
i = [c['type'] for c in conditions].index('NetworkUnavailable')
if conditions[i]['status'] == 'True':
hookenv.log('Clearing NetworkUnavailable from {}'.format(
node_name))
conditions[i] = {
"type": "NetworkUnavailable",
"status": "False",
"reason": "RouteCreated",
"message": "Manually set through k8s api",
}
req = Request(url, method='PUT',
data=json.dumps(node_info).encode('utf8'),
headers={'Content-Type': 'application/json'})
with urlopen(req) as response:
code = response.getcode()
body = response.read().decode('utf8')
if code not in (200, 201, 202):
hookenv.log('failed to update node status [{}]: {}'.format(
code, body), hookenv.ERROR)
return
except (json.JSONDecodeError, KeyError):
hookenv.log('failed to parse node status: {}'.format(body),
hookenv.ERROR)
return
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def getStorageBackend():
storage_backend = hookenv.config('storage-backend')
if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend')
return storage_backend
@when('leadership.is_leader')
@when_not('leadership.set.cluster_tag')
def create_cluster_tag():
cluster_tag = 'kubernetes-{}'.format(token_generator().lower())
leader_set(cluster_tag=cluster_tag)
@when('leadership.set.cluster_tag',
'kube-control.connected')
@when_not('kubernetes-master.cluster-tag-sent')
def send_cluster_tag():
cluster_tag = leader_get('cluster_tag')
kube_control = endpoint_from_flag('kube-control.connected')
kube_control.set_cluster_tag(cluster_tag)
set_state('kubernetes-master.cluster-tag-sent')
@when_not('kube-control.connected')
def clear_cluster_tag_sent():
remove_state('kubernetes-master.cluster-tag-sent')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined')
@when('leadership.set.cluster_tag')
@when_not('kubernetes-master.cloud-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
cluster_tag = leader_get('cluster_tag')
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
'k8s.io/role/master': 'true',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
cloud.enable_load_balancer_management()
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
'k8s-io-role-master': 'master',
})
cloud.enable_object_storage_management()
cloud.enable_security_management()
cloud.enable_instance_inspection()
cloud.enable_network_management()
cloud.enable_dns_management()
cloud.enable_block_storage_management()
set_state('kubernetes-master.cloud-request-sent')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined')
@when('kubernetes-master.cloud-request-sent')
def clear_requested_integration():
remove_state('kubernetes-master.cloud-request-sent')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready')
@when_not('kubernetes-master.restarted-for-cloud')
def restart_for_cloud():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kube-apiserver')
_write_gcp_snap_config('kube-controller-manager')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kube-apiserver')
_write_openstack_snap_config('kube-controller-manager')
set_state('kubernetes-master.restarted-for-cloud')
remove_state('kubernetes-master.components.started') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
| apache-2.0 |
tinfoil/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py | 117 | 39195 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for interacting with Bugzilla
import logging
import mimetypes
import re
import StringIO
import socket
import urllib
from datetime import datetime # used in timestamp()
from .attachment import Attachment
from .bug import Bug
from webkitpy.common.config import committers
import webkitpy.common.config.urls as config_urls
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.user import User
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
_log = logging.getLogger(__name__)
class EditUsersParser(object):
def __init__(self):
self._group_name_to_group_string_cache = {}
def _login_and_uid_from_row(self, row):
first_cell = row.find("td")
# The first row is just headers, we skip it.
if not first_cell:
return None
# When there were no results, we have a fake "<none>" entry in the table.
if first_cell.find(text="<none>"):
return None
# Otherwise the <td> contains a single <a> which contains the login name or a single <i> with the string "<none>".
anchor_tag = first_cell.find("a")
login = unicode(anchor_tag.string).strip()
user_id = int(re.search(r"userid=(\d+)", str(anchor_tag['href'])).group(1))
return (login, user_id)
def login_userid_pairs_from_edit_user_results(self, results_page):
soup = BeautifulSoup(results_page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
results_table = soup.find(id="admin_table")
login_userid_pairs = [self._login_and_uid_from_row(row) for row in results_table('tr')]
# Filter out None from the logins.
return filter(lambda pair: bool(pair), login_userid_pairs)
def _group_name_and_string_from_row(self, row):
label_element = row.find('label')
group_string = unicode(label_element['for'])
group_name = unicode(label_element.find('strong').string).rstrip(':')
return (group_name, group_string)
def user_dict_from_edit_user_page(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
user_table = soup.find("table", {'class': 'main'})
user_dict = {}
for row in user_table('tr'):
label_element = row.find('label')
if not label_element:
continue # This must not be a row we know how to parse.
if row.find('table'):
continue # Skip the <tr> holding the groups table.
key = label_element['for']
if "group" in key:
key = "groups"
value = user_dict.get('groups', set())
# We must be parsing a "tr" inside the inner group table.
(group_name, _) = self._group_name_and_string_from_row(row)
if row.find('input', {'type': 'checkbox', 'checked': 'checked'}):
value.add(group_name)
else:
value = unicode(row.find('td').string).strip()
user_dict[key] = value
return user_dict
def _group_rows_from_edit_user_page(self, edit_user_page):
soup = BeautifulSoup(edit_user_page, convertEntities=BeautifulSoup.HTML_ENTITIES)
return soup('td', {'class': 'groupname'})
def group_string_from_name(self, edit_user_page, group_name):
# Bugzilla uses "group_NUMBER" strings, which may be different per install
# so we just look them up once and cache them.
if not self._group_name_to_group_string_cache:
rows = self._group_rows_from_edit_user_page(edit_user_page)
name_string_pairs = map(self._group_name_and_string_from_row, rows)
self._group_name_to_group_string_cache = dict(name_string_pairs)
return self._group_name_to_group_string_cache[group_name]
def timestamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
# A container for all of the logic for making and parsing bugzilla queries.
class BugzillaQueries(object):
def __init__(self, bugzilla):
self._bugzilla = bugzilla
def _is_xml_bugs_form(self, form):
# ClientForm.HTMLForm.find_control throws if the control is not found,
# so we do a manual search instead:
return "xml" in [control.id for control in form.controls]
# This is kinda a hack. There is probably a better way to get this information from bugzilla.
def _parse_result_count(self, results_page):
result_count_text = BeautifulSoup(results_page).find(attrs={'class': 'bz_result_count'}).string
result_count_parts = result_count_text.strip().split(" ")
if result_count_parts[0] == "Zarro":
return 0
if result_count_parts[0] == "One":
return 1
return int(result_count_parts[0])
# Note: _load_query, _fetch_bug and _fetch_bugs_from_advanced_query
# are the only methods which access self._bugzilla.
def _load_query(self, query):
self._bugzilla.authenticate()
full_url = "%s%s" % (config_urls.bug_server_url, query)
return self._bugzilla.browser.open(full_url)
def _fetch_bugs_from_advanced_query(self, query):
results_page = self._load_query(query)
# Some simple searches can return a single result.
results_url = results_page.geturl()
if results_url.find("/show_bug.cgi?id=") != -1:
bug_id = int(results_url.split("=")[-1])
return [self._fetch_bug(bug_id)]
if not self._parse_result_count(results_page):
return []
# Bugzilla results pages have an "XML" submit button at the bottom
# which can be used to get an XML page containing all of the <bug> elements.
# This is slighty lame that this assumes that _load_query used
# self._bugzilla.browser and that it's in an acceptable state.
self._bugzilla.browser.select_form(predicate=self._is_xml_bugs_form)
bugs_xml = self._bugzilla.browser.submit()
return self._bugzilla._parse_bugs_from_xml(bugs_xml)
def _fetch_bug(self, bug_id):
return self._bugzilla.fetch_bug(bug_id)
def _fetch_bug_ids_advanced_query(self, query):
soup = BeautifulSoup(self._load_query(query))
# The contents of the <a> inside the cells in the first column happen
# to be the bug id.
return [int(bug_link_cell.find("a").string)
for bug_link_cell in soup('td', "first-child")]
def _parse_attachment_ids_request_query(self, page):
digits = re.compile("\d+")
attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
attachment_links = SoupStrainer("a", href=attachment_href)
return [int(digits.search(tag["href"]).group(0))
for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
def _fetch_attachment_ids_request_query(self, query):
return self._parse_attachment_ids_request_query(self._load_query(query))
def _parse_quips(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulSoup.HTML_ENTITIES)
quips = soup.find(text=re.compile(r"Existing quips:")).findNext("ul").findAll("li")
return [unicode(quip_entry.string) for quip_entry in quips]
def fetch_quips(self):
return self._parse_quips(self._load_query("/quips.cgi?action=show"))
# List of all r+'d bugs.
def fetch_bug_ids_from_pending_commit_list(self):
needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
def fetch_bugs_matching_quicksearch(self, search_string):
# We may want to use a more explicit query than "quicksearch".
# If quicksearch changes we should probably change to use
# a normal buglist.cgi?query_format=advanced query.
quicksearch_url = "buglist.cgi?quicksearch=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(quicksearch_url)
# Currently this returns all bugs across all components.
# In the future we may wish to extend this API to construct more restricted searches.
def fetch_bugs_matching_search(self, search_string):
query = "buglist.cgi?query_format=advanced"
if search_string:
query += "&short_desc_type=allwordssubstr&short_desc=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(query)
def fetch_patches_from_pending_commit_list(self):
return sum([self._fetch_bug(bug_id).reviewed_patches()
for bug_id in self.fetch_bug_ids_from_pending_commit_list()], [])
def fetch_bugs_from_review_queue(self, cc_email=None):
query = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
if cc_email:
query += "&emailcc1=1&emailtype1=substring&email1=%s" % urllib.quote(cc_email)
return self._fetch_bugs_from_advanced_query(query)
def fetch_bug_ids_from_commit_queue(self):
commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed"
return self._fetch_bug_ids_advanced_query(commit_queue_url)
def fetch_patches_from_commit_queue(self):
# This function will only return patches which have valid committers
# set. It won't reject patches with invalid committers/reviewers.
return sum([self._fetch_bug(bug_id).commit_queued_patches()
for bug_id in self.fetch_bug_ids_from_commit_queue()], [])
def fetch_bug_ids_from_review_queue(self):
review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
return self._fetch_bug_ids_advanced_query(review_queue_url)
# This method will make several requests to bugzilla.
def fetch_patches_from_review_queue(self, limit=None):
# [:None] returns the whole array.
return sum([self._fetch_bug(bug_id).unreviewed_patches()
for bug_id in self.fetch_bug_ids_from_review_queue()[:limit]], [])
# NOTE: This is the only client of _fetch_attachment_ids_request_query
# This method only makes one request to bugzilla.
def fetch_attachment_ids_from_review_queue(self):
review_queue_url = "request.cgi?action=queue&type=review&group=type"
return self._fetch_attachment_ids_request_query(review_queue_url)
# This only works if your account has edituser privileges.
# We could easily parse https://bugs.webkit.org/userprefs.cgi?tab=permissions to
# check permissions, but bugzilla will just return an error if we don't have them.
def fetch_login_userid_pairs_matching_substring(self, search_string):
review_queue_url = "editusers.cgi?action=list&matchvalue=login_name&matchstr=%s&matchtype=substr" % urllib.quote(search_string)
results_page = self._load_query(review_queue_url)
# We could pull the EditUsersParser off Bugzilla if needed.
return EditUsersParser().login_userid_pairs_from_edit_user_results(results_page)
# FIXME: We should consider adding a BugzillaUser class.
def fetch_logins_matching_substring(self, search_string):
pairs = self.fetch_login_userid_pairs_matching_substring(search_string)
return map(lambda pair: pair[0], pairs)
class Bugzilla(object):
def __init__(self, committers=committers.CommitterList()):
self.authenticated = False
self.queries = BugzillaQueries(self)
self.committers = committers
self.cached_quips = []
self.edit_user_parser = EditUsersParser()
self._browser = None
def _get_browser(self):
if not self._browser:
self.setdefaulttimeout(600)
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
# Ignore bugs.webkit.org/robots.txt until we fix it to allow this script.
self._browser.set_handle_robots(False)
return self._browser
def _set_browser(self, value):
self._browser = value
browser = property(_get_browser, _set_browser)
def setdefaulttimeout(self, value):
socket.setdefaulttimeout(value)
def fetch_user(self, user_id):
self.authenticate()
edit_user_page = self.browser.open(self.edit_user_url_for_id(user_id))
return self.edit_user_parser.user_dict_from_edit_user_page(edit_user_page)
def add_user_to_groups(self, user_id, group_names):
self.authenticate()
user_edit_page = self.browser.open(self.edit_user_url_for_id(user_id))
self.browser.select_form(nr=1)
for group_name in group_names:
group_string = self.edit_user_parser.group_string_from_name(user_edit_page, group_name)
self.browser.find_control(group_string).items[0].selected = True
self.browser.submit()
def quips(self):
# We only fetch and parse the list of quips once per instantiation
# so that we do not burden bugs.webkit.org.
if not self.cached_quips:
self.cached_quips = self.queries.fetch_quips()
return self.cached_quips
def bug_url_for_bug_id(self, bug_id, xml=False):
if not bug_id:
return None
content_type = "&ctype=xml&excludefield=attachmentdata" if xml else ""
return "%sshow_bug.cgi?id=%s%s" % (config_urls.bug_server_url, bug_id, content_type)
def short_bug_url_for_bug_id(self, bug_id):
if not bug_id:
return None
return "http://webkit.org/b/%s" % bug_id
def add_attachment_url(self, bug_id):
return "%sattachment.cgi?action=enter&bugid=%s" % (config_urls.bug_server_url, bug_id)
def attachment_url_for_id(self, attachment_id, action="view"):
if not attachment_id:
return None
action_param = ""
if action and action != "view":
action_param = "&action=%s" % action
return "%sattachment.cgi?id=%s%s" % (config_urls.bug_server_url,
attachment_id,
action_param)
def edit_user_url_for_id(self, user_id):
return "%seditusers.cgi?action=edit&userid=%s" % (config_urls.bug_server_url, user_id)
def _parse_attachment_flag(self,
element,
flag_name,
attachment,
result_key):
flag = element.find('flag', attrs={'name': flag_name})
if flag:
attachment[flag_name] = flag['status']
if flag['status'] == '+':
attachment[result_key] = flag['setter']
# Sadly show_bug.cgi?ctype=xml does not expose the flag modification date.
def _string_contents(self, soup):
# WebKit's bugzilla instance uses UTF-8.
# BeautifulStoneSoup always returns Unicode strings, however
# the .string method returns a (unicode) NavigableString.
# NavigableString can confuse other parts of the code, so we
# convert from NavigableString to a real unicode() object using unicode().
return unicode(soup.string)
# Example: 2010-01-20 14:31 PST
# FIXME: Some bugzilla dates seem to have seconds in them?
# Python does not support timezones out of the box.
# Assume that bugzilla always uses PST (which is true for bugs.webkit.org)
_bugzilla_date_format = "%Y-%m-%d %H:%M:%S"
@classmethod
def _parse_date(cls, date_string):
(date, time, time_zone) = date_string.split(" ")
if time.count(':') == 1:
# Add seconds into the time.
time += ':0'
# Ignore the timezone because python doesn't understand timezones out of the box.
date_string = "%s %s" % (date, time)
return datetime.strptime(date_string, cls._bugzilla_date_format)
def _date_contents(self, soup):
return self._parse_date(self._string_contents(soup))
def _parse_attachment_element(self, element, bug_id):
attachment = {}
attachment['bug_id'] = bug_id
attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
attachment['id'] = int(element.find('attachid').string)
# FIXME: No need to parse out the url here.
attachment['url'] = self.attachment_url_for_id(attachment['id'])
attachment["attach_date"] = self._date_contents(element.find("date"))
attachment['name'] = self._string_contents(element.find('desc'))
attachment['attacher_email'] = self._string_contents(element.find('attacher'))
attachment['type'] = self._string_contents(element.find('type'))
self._parse_attachment_flag(
element, 'review', attachment, 'reviewer_email')
self._parse_attachment_flag(
element, 'commit-queue', attachment, 'committer_email')
return attachment
def _parse_log_descr_element(self, element):
comment = {}
comment['comment_email'] = self._string_contents(element.find('who'))
comment['comment_date'] = self._date_contents(element.find('bug_when'))
comment['text'] = self._string_contents(element.find('thetext'))
return comment
def _parse_bugs_from_xml(self, page):
soup = BeautifulSoup(page)
# Without the unicode() call, BeautifulSoup occasionally complains of being
# passed None for no apparent reason.
return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')]
def _parse_bug_dictionary_from_xml(self, page):
soup = BeautifulStoneSoup(page, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
bug = {}
bug["id"] = int(soup.find("bug_id").string)
bug["title"] = self._string_contents(soup.find("short_desc"))
bug["bug_status"] = self._string_contents(soup.find("bug_status"))
dup_id = soup.find("dup_id")
if dup_id:
bug["dup_id"] = self._string_contents(dup_id)
bug["reporter_email"] = self._string_contents(soup.find("reporter"))
bug["assigned_to_email"] = self._string_contents(soup.find("assigned_to"))
bug["cc_emails"] = [self._string_contents(element) for element in soup.findAll('cc')]
bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')]
bug["comments"] = [self._parse_log_descr_element(element) for element in soup.findAll('long_desc')]
return bug
# Makes testing fetch_*_from_bug() possible until we have a better
# BugzillaNetwork abstration.
def _fetch_bug_page(self, bug_id):
bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
_log.info("Fetching: %s" % bug_url)
return self.browser.open(bug_url)
def fetch_bug_dictionary(self, bug_id):
try:
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
except KeyboardInterrupt:
raise
except:
self.authenticate()
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
# FIXME: A BugzillaCache object should provide all these fetch_ methods.
def fetch_bug(self, bug_id):
return Bug(self.fetch_bug_dictionary(bug_id), self)
def fetch_attachment_contents(self, attachment_id):
attachment_url = self.attachment_url_for_id(attachment_id)
# We need to authenticate to download patches from security bugs.
self.authenticate()
return self.browser.open(attachment_url).read()
def _parse_bug_id_from_attachment_page(self, page):
# The "Up" relation happens to point to the bug.
up_link = BeautifulSoup(page).find('link', rel='Up')
if not up_link:
# This attachment does not exist (or you don't have permissions to
# view it).
return None
match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
return int(match.group('bug_id'))
def bug_id_for_attachment_id(self, attachment_id):
self.authenticate()
attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
_log.info("Fetching: %s" % attachment_url)
page = self.browser.open(attachment_url)
return self._parse_bug_id_from_attachment_page(page)
# FIXME: This should just return Attachment(id), which should be able to
# lazily fetch needed data.
def fetch_attachment(self, attachment_id):
# We could grab all the attachment details off of the attachment edit
# page but we already have working code to do so off of the bugs page,
# so re-use that.
bug_id = self.bug_id_for_attachment_id(attachment_id)
if not bug_id:
return None
attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True)
for attachment in attachments:
if attachment.id() == int(attachment_id):
return attachment
return None # This should never be hit.
def authenticate(self):
if self.authenticated:
return
credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla")
attempts = 0
while not self.authenticated:
attempts += 1
username, password = credentials.read_credentials()
_log.info("Logging in as %s..." % username)
self.browser.open(config_urls.bug_server_url +
"index.cgi?GoAheadAndLogIn=1")
self.browser.select_form(name="login")
self.browser['Bugzilla_login'] = username
self.browser['Bugzilla_password'] = password
self.browser.find_control("Bugzilla_restrictlogin").items[0].selected = False
response = self.browser.submit()
match = re.search("<title>(.+?)</title>", response.read())
# If the resulting page has a title, and it contains the word
# "invalid" assume it's the login failure page.
if match and re.search("Invalid", match.group(1), re.IGNORECASE):
errorMessage = "Bugzilla login failed: %s" % match.group(1)
# raise an exception only if this was the last attempt
if attempts < 5:
_log.error(errorMessage)
else:
raise Exception(errorMessage)
else:
self.authenticated = True
self.username = username
# FIXME: Use enum instead of two booleans
def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue):
if mark_for_landing:
user = self.committers.contributor_by_email(self.username)
mark_for_commit_queue = True
if not user:
_log.warning("Your Bugzilla login is not listed in committers.py. Uploading with cq? instead of cq+")
mark_for_landing = False
elif not user.can_commit:
_log.warning("You're not a committer yet or haven't updated committers.py yet. Uploading with cq? instead of cq+")
mark_for_landing = False
if mark_for_landing:
return '+'
if mark_for_commit_queue:
return '?'
return 'X'
# FIXME: mark_for_commit_queue and mark_for_landing should be joined into a single commit_flag argument.
def _fill_attachment_form(self,
description,
file_object,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False,
is_patch=False,
filename=None,
mimetype=None):
self.browser['description'] = description
if is_patch:
self.browser['ispatch'] = ("1",)
# FIXME: Should this use self._find_select_element_for_flag?
self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',)
self.browser['flag_type-3'] = (self._commit_queue_flag(mark_for_landing, mark_for_commit_queue),)
filename = filename or "%s.patch" % timestamp()
if not mimetype:
mimetypes.add_type('text/plain', '.patch') # Make sure mimetypes knows about .patch
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = "text/plain" # Bugzilla might auto-guess for us and we might not need this?
self.browser.add_file(file_object, mimetype, filename, 'data')
def _file_object_for_upload(self, file_or_string):
if hasattr(file_or_string, 'read'):
return file_or_string
# Only if file_or_string is not already encoded do we want to encode it.
if isinstance(file_or_string, unicode):
file_or_string = file_or_string.encode('utf-8')
return StringIO.StringIO(file_or_string)
# timestamp argument is just for unittests.
def _filename_for_upload(self, file_object, bug_id, extension="txt", timestamp=timestamp):
if hasattr(file_object, "name"):
return file_object.name
return "bug-%s-%s.%s" % (bug_id, timestamp(), extension)
def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
self.authenticate()
_log.info('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = filename or self._filename_for_upload(file_object, bug_id)
self._fill_attachment_form(description, file_object, filename=filename, mimetype=mimetype)
if comment_text:
_log.info(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: The arguments to this function should be simplified and then
# this should be merged into add_attachment_to_bug
def add_patch_to_bug(self,
bug_id,
file_or_string,
description,
comment_text=None,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False):
self.authenticate()
_log.info('Adding patch "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = self._filename_for_upload(file_object, bug_id, extension="patch")
self._fill_attachment_form(description,
file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
mark_for_landing=mark_for_landing,
is_patch=True,
filename=filename)
if comment_text:
_log.info(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: There has to be a more concise way to write this method.
def _check_create_bug_response(self, response_html):
match = re.search("<title>Bug (?P<bug_id>\d+) Submitted[^<]*</title>",
response_html)
if match:
return match.group('bug_id')
match = re.search(
'<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">',
response_html,
re.DOTALL)
error_message = "FAIL"
if match:
text_lines = BeautifulSoup(
match.group('error_message')).findAll(text=True)
error_message = "\n" + '\n'.join(
[" " + line.strip()
for line in text_lines if line.strip()])
raise Exception("Bug not created: %s" % error_message)
def create_bug(self,
bug_title,
bug_description,
component=None,
diff=None,
patch_description=None,
cc=None,
blocked=None,
assignee=None,
mark_for_review=False,
mark_for_commit_queue=False):
self.authenticate()
_log.info('Creating bug with title "%s"' % bug_title)
self.browser.open(config_urls.bug_server_url + "enter_bug.cgi?product=WebKit")
self.browser.select_form(name="Create")
component_items = self.browser.find_control('component').items
component_names = map(lambda item: item.name, component_items)
if not component:
component = "New Bugs"
if component not in component_names:
component = User.prompt_with_list("Please pick a component:", component_names)
self.browser["component"] = [component]
if cc:
self.browser["cc"] = cc
if blocked:
self.browser["blocked"] = unicode(blocked)
if not assignee:
assignee = self.username
if assignee and not self.browser.find_control("assigned_to").disabled:
self.browser["assigned_to"] = assignee
self.browser["short_desc"] = bug_title
self.browser["comment"] = bug_description
if diff:
# _fill_attachment_form expects a file-like object
# Patch files are already binary, so no encoding needed.
assert(isinstance(diff, str))
patch_file_object = StringIO.StringIO(diff)
self._fill_attachment_form(
patch_description,
patch_file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
is_patch=True)
response = self.browser.submit()
bug_id = self._check_create_bug_response(response.read())
_log.info("Bug %s created." % bug_id)
_log.info("%sshow_bug.cgi?id=%s" % (config_urls.bug_server_url, bug_id))
return bug_id
def _find_select_element_for_flag(self, flag_name):
# FIXME: This will break if we ever re-order attachment flags
if flag_name == "review":
return self.browser.find_control(type='select', nr=0)
elif flag_name == "commit-queue":
return self.browser.find_control(type='select', nr=1)
raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
def clear_attachment_flags(self,
attachment_id,
additional_comment_text=None):
self.authenticate()
comment_text = "Clearing flags on attachment: %s" % attachment_id
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
_log.info(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
self.browser.submit()
def set_flag_on_attachment(self,
attachment_id,
flag_name,
flag_value,
comment_text=None):
# FIXME: We need a way to test this function on a live bugzilla
# instance.
self.authenticate()
_log.info(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
if comment_text:
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag(flag_name).value = (flag_value,)
self.browser.submit()
# FIXME: All of these bug editing methods have a ridiculous amount of
# copy/paste code.
def obsolete_attachment(self, attachment_id, comment_text=None):
self.authenticate()
_log.info("Obsoleting attachment: %s" % attachment_id)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.find_control('isobsolete').items[0].selected = True
# Also clear any review flag (to remove it from review/commit queues)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
if comment_text:
_log.info(comment_text)
# Bugzilla has two textareas named 'comment', one is somehow
# hidden. We want the first.
self.browser.set_value(comment_text, name='comment', nr=0)
self.browser.submit()
def add_cc_to_bug(self, bug_id, email_address_list):
self.authenticate()
_log.info("Adding %s to the CC list for bug %s" % (email_address_list, bug_id))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["newcc"] = ", ".join(email_address_list)
self.browser.submit()
def post_comment_to_bug(self, bug_id, comment_text, cc=None):
self.authenticate()
_log.info("Adding comment to bug %s" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["comment"] = comment_text
if cc:
self.browser["newcc"] = ", ".join(cc)
self.browser.submit()
def close_bug_as_fixed(self, bug_id, comment_text=None):
self.authenticate()
_log.info("Closing bug %s as fixed" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if comment_text:
self.browser['comment'] = comment_text
self.browser['bug_status'] = ['RESOLVED']
self.browser['resolution'] = ['FIXED']
self.browser.submit()
def _has_control(self, form, id):
return id in [control.id for control in form.controls]
def reassign_bug(self, bug_id, assignee=None, comment_text=None):
self.authenticate()
if not assignee:
assignee = self.username
_log.info("Assigning bug %s to %s" % (bug_id, assignee))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if not self._has_control(self.browser, "assigned_to"):
_log.warning("""Failed to assign bug to you (can't find assigned_to) control.
Ignore this message if you don't have EditBugs privileges (https://bugs.webkit.org/userprefs.cgi?tab=permissions)""")
return
if comment_text:
_log.info(comment_text)
self.browser["comment"] = comment_text
self.browser["assigned_to"] = assignee
self.browser.submit()
def reopen_bug(self, bug_id, comment_text):
self.authenticate()
_log.info("Re-opening bug %s" % bug_id)
# Bugzilla requires a comment when re-opening a bug, so we know it will
# never be None.
_log.info(comment_text)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
bug_status = self.browser.find_control("bug_status", type="select")
# This is a hack around the fact that ClientForm.ListControl seems to
# have no simpler way to ask if a control has an item named "REOPENED"
# without using exceptions for control flow.
possible_bug_statuses = map(lambda item: item.name, bug_status.items)
if "REOPENED" in possible_bug_statuses:
bug_status.value = ["REOPENED"]
# If the bug was never confirmed it will not have a "REOPENED"
# state, but only an "UNCONFIRMED" state.
elif "UNCONFIRMED" in possible_bug_statuses:
bug_status.value = ["UNCONFIRMED"]
else:
# FIXME: This logic is slightly backwards. We won't print this
# message if the bug is already open with state "UNCONFIRMED".
_log.info("Did not reopen bug %s, it appears to already be open with status %s." % (bug_id, bug_status.value))
self.browser['comment'] = comment_text
self.browser.submit()
| bsd-3-clause |
ENCODE-DCC/dxencode | scrub.py | 1 | 34431 | #!/usr/bin/env python2.7
# scrub.py 1.0.0
#
# Scrub.py will remove all files for an experiment [replicate] and genome/annotation
#
# 1) Lookup experiment type from encoded, based on accession
# 2) Locate the experiment accession named folder
# 3) Given the experiment type, determine the expected results
# 4) Given expected results locate any files (by glob) that should be removed
# a) each single replicate (in replicate sub-folders named as reN_N/
# b) combined replicates in the experiment folder itself
# 5) For each file that should be removed, determine if the file has already been posted
# 6) For each file that needs to be removed and has already been posted, remove
import argparse,os, sys
import json, urlparse, subprocess, itertools, logging, time
from datetime import datetime
from base64 import b64encode
import commands
import dxpy
import dx
import encd
class Scrub(object):
'''
Scrub module removes posted experiment files from dnanexus.
'''
TOOL_IS = 'scrub'
HELP_BANNER = "Scrubs posted files from DX. "
''' This help banner is displayed by get_args.'''
SERVER_DEFAULT = 'www'
'''This the server to makes files have been posed to.'''
FOLDER_DEFAULT = "/"
'''Where to start the search for experiment folders.'''
EXPERIMENT_TYPES_SUPPORTED = [ 'long-rna-seq', 'small-rna-seq', 'rampage', 'dna-me', 'dnase-seq' ]
'''This module supports only these experiment (pipeline) types.'''
SKIP_VALIDATE = {"transcription start sites":'bed'}
'''Some output_types cannot currently be validated, theoretically'''
# Pipeline specifications include order of steps, steps per replicate, combined steps and
# within steps, the output_type: file_glob that define expected results.
# Note: that some steps have multiple files with the same output_type (e.g. hotspot: bed & bb).
# When this happens, key on "output_type|format|format_type": file_glob
# (e.g. "hotspot|bed|narrowPeak": "*_hotspot.bed" and "hotspot|bb|narrowPeak": "*_hotspot.bb")
# TODO: This could be done more gracefully.
PIPELINE_SPECS = {
"long-rna-seq": {
"step-order": [ "align-tophat","signals-top-se","signals-top-pe",
"align-star","signals-star-se","signals-star-pe","quant-rsem","mad-qc"],
"replicate": {
"align-tophat": { "alignments": "*_tophat.bam" ,
"QC_only": "*_flagstat.txt" },
"signals-top-se": { "signal of all reads": "*_tophat_all.bw",
"signal of unique reads": "*_tophat_uniq.bw" },
"signals-top-pe": { "minus strand signal of all reads": "*_tophat_minusAll.bw",
"plus strand signal of all reads": "*_tophat_plusAll.bw",
"minus strand signal of unique reads": "*_tophat_minusUniq.bw",
"plus strand signal of unique reads": "*_tophat_plusUniq.bw" },
"signals-star-se": { "signal of all reads": "*_star_genome_all.bw",
"signal of unique reads": "*_star_genome_uniq.bw" },
"signals-star-pe": { "minus strand signal of all reads": "*_star_genome_minusAll.bw",
"plus strand signal of all reads": "*_star_genome_plusAll.bw",
"minus strand signal of unique reads": "*_star_genome_minusUniq.bw",
"plus strand signal of unique reads": "*_star_genome_plusUniq.bw" },
"align-star": { "alignments": "*_star_genome.bam",
"transcriptome alignments": "*_star_anno.bam",
"QC_only": "*_star_Log.final.out" },
"quant-rsem": { "gene quantifications": "*_rsem.genes.results",
"transcript quantifications": "*_rsem.isoforms.results" } },
"combined": {
"mad-qc": { "QC_only": "*_mad_plot.png" } },
},
"small-rna-seq": {
"step-order": [ "align","signals","mad_qc"],
"replicate": {
"align": { "alignments": "*_srna_star.bam",
"gene quantifications": "*_srna_star_quant.tsv" },
"signals": { "plus strand signal of all reads": "*_srna_star_plusAll.bw",
"minus strand signal of all reads": "*_srna_star_minusAll.bw",
"plus strand signal of unique reads": "*_srna_star_plusUniq.bw",
"minus strand signal of unique reads": "*_srna_star_minusUniq.bw" } },
"combined": {
"mad_qc": { "QC_only": "*_mad_plot.png" } },
},
"rampage": {
"step-order": [ "align","signals","peaks","idr","mad_qc"],
"replicate": {
"align": { "alignments": "*_star_marked.bam",
"QC_only": "*_flagstat.txt" },
"signals": { "plus strand signal of all reads": "*_5p_plusAll.bw",
"minus strand signal of all reads": "*_5p_minusAll.bw",
"plus strand signal of unique reads": "*_5p_plusUniq.bw",
"minus strand signal of unique reads": "*_5p_minusUniq.bw" },
"peaks": { "transcription start sites|gff|gff3": "*_peaks.gff.gz",
"transcription start sites|bed|tss_peak": "*_peaks.bed.gz",
"transcription start sites|bigBed|tss_peak": "*_peaks.bb",
"gene quantifications": "*_peaks_quant.tsv" } },
"combined": {
"idr": { "transcription start sites|bed|idr_peak": "*_idr.bed.gz",
"transcription start sites|bigBed|idr_peak": "*_idr.bb" },
"mad_qc": { "QC_only": "*_mad_plot.png" } },
},
"dna-me": {
"step-order": [ "align","quantification","corr"], # How to: 1) combine 3 steps into 1; 2) tech lvl, bio lvl, exp lvl
"replicate": {
"align": { "alignments": [ "*_techrep_bismark_pe.bam", "*_bismark.bam" ] }, # *may* have samtools_flagstat, samtools_stats, Don't wan't bismark_map
"quantification": { "methylation state at CpG|bigBed|bedMethyl": "*_bismark_biorep_CpG.bb", # All have: samtools_flagstat, bismark_map
"methylation state at CpG|bed|bedMethyl": "*_bismark_biorep_CpG.bed.gz", # All have: samtools_flagstat, bismark_map
"methylation state at CHG|bigBed|bedMethyl": "*_bismark_biorep_CHG.bb", # All have: samtools_flagstat, bismark_map
"methylation state at CHG|bed|bedMethyl": "*_bismark_biorep_CHG.bed.gz", # All have: samtools_flagstat, bismark_map
"methylation state at CHH|bigBed|bedMethyl": "*_bismark_biorep_CHH.bb", # All have: samtools_flagstat, bismark_map
"methylation state at CHH|bed|bedMethyl": "*_bismark_biorep_CHH.bed.gz", # All have: samtools_flagstat, bismark_map
"signal": "*_bismark_biorep.bw" } }, # All have: samtools_flagstat, bismark_map
"combined": {
"corr": { "QC_only": "*_CpG_corr.txt" } }, # Not yet defined in encodeD
},
"dnase-seq": {
"step-order": [ "dnase-align-bwa","dnase-filter","dnase-call-hotspots"],
"replicate": {
"dnase-align-bwa": { "unfiltered alignments": "*_bwa_techrep.bam" },
"dnase-filter": { "alignments": "*_bwa_biorep_filtered.bam" },
"dnase-call-hotspots": { "hotspots|bed|broadPeak": "*_hotspots.bed.gz",
"hotspots|bigBed|broadPeak": "*_hotspots.bb",
"peaks|bed|narrowPeak": "*_peaks.bed.gz",
"peaks|bigBed|narrowPeak": "*_peaks.bb",
"signal of unique reads": "*_density.bw" } },
"combined": {
},
},
}
# Step children are steps that should be combined with their parent step rather than be treated as a separate job
STEP_CHILDREN = {
"dme-cx-to-bed": "dme-extract-pe",
"dme-cx-to-bed-alt": "dme-extract-se",
"dme-bg-to-signal": "dme-extract-pe",
"dme-bg-to-signal-alt": "dme-extract-se",
}
ASSEMBLIES_SUPPORTED = { "hg19": "hg19", "GRCh38": "GRCh38", "mm10": "mm10" }
'''This module supports only these assemblies.'''
ANNOTATIONS_SUPPORTED = [ 'V24', 'V19', 'M2', 'M3', 'M4' ]
'''This module supports only these annotations.'''
REQUIRE_ANNOTATION = [ 'long-rna-seq','small-rna-seq','rampage' ]
'''These assays require an annotation.'''
FORMATS_SUPPORTED = ["bam","bed","bigBed","bigWig","fasta","fastq","gff","gtf","hdf5","idat","rcc","CEL",
"tsv","csv","sam","tar","wig","txt"]
EXTENSION_TO_FORMAT = {
"2bit": "2bit",
"cel.gz": "CEL",
"bam": "bam",
"bed.gz": "bed", "bed": "bed",
"bigBed": "bigBed", "bb": "bigBed",
"bigWig": "bigWig", "bw": "bigWig",
"csfasta.gz": "csfasta",
"csqual.gz": "csqual",
"fasta.gz": "fasta", "fa.gz": "fasta", "fa": "fasta",
"fastq.gz": "fastq", "fq.gz": "fastq", "fq": "fastq",
"gff.gz": "gff", "gff": "gff",
"gtf.gz": "gtf", "gtf": "gtf",
"h5": "hdf5",
"idat": "idat",
"rcc": "rcc",
"tar.gz": "tar", "tgz": "tar",
"tsv": "tsv", "results": "tsv",
"csv": "csv",
"wig.gz": "wig", "wig": "wig",
"sam.gz": "sam", "sam": "sam"
}
'''List of supported formats, and means of recognizing with file extensions.'''
PRIMARY_INPUT_EXTENSION = [ "fastq","fq"]
'''List of file extensions used to recognize primary inputs to parse accessions.'''
def __init__(self):
'''
Scrub expects one or more experiment ids as arguments and will find files that
should be removed from the associated directory.
'''
self.args = {} # run time arguments
self.server_key = 'www' # TODO: replace with self.encd.server_key when Encd class is created
self.server = None # TODO: replace with self.encd.server() when Encd class is created
self.acc_prefix = "TSTFF"
self.proj_name = None
self.project = None
self.proj_id = None
self.exp = {} # Will hold the encoded exp json
self.exp_id = None
self.exp_type = {} # Will hold the experiment's assay_type, normalized to known tokens.
self.genome = None # genome should be required
self.annotation = None # if appropriate (mice), points the way to the sub-dir
self.pipeline = None # pipeline definitions (filled in when experiment type is known)
self.replicates = None # lost replicate folders currently found beneath experiment folder
self.fastqs_too = False
self.test = True # assume Test until told otherwise
self.force = False # remove files whether posted or not
self.remove_all = False # Removes experiment dir and all files beneath it recursively (Requires force!)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
encd.logger = logging.getLogger(__name__ + '.dxe') # I need this to avoid some errors
encd.logger.addHandler(logging.StreamHandler()) #logging.NullHandler)
print
def get_args(self,parse=True):
'''Parse the input arguments.'''
### PIPELINE SPECIFIC
ap = argparse.ArgumentParser(description=self.HELP_BANNER + "All results " +
"are expected to be in folder /<resultsLoc>/<experiment> and any replicate " +
"sub-folders named as " +
"<experiment>/rep<biological-replicate>_<technical-replicate>.")
### PIPELINE SPECIFIC
ap.add_argument('-e', '--experiments',
help='One or more ENCODED experiment accessions, or file containing list',
nargs='+',
required=True)
ap.add_argument('--project',
help="Project to run analysis in (default: '" + \
dx.env_get_current_project() + "')",
required=False)
ap.add_argument('-f','--folder',
help="The location to search for experiment folders (default: " + \
"'<project>:" + self.FOLDER_DEFAULT + "')",
default=self.FOLDER_DEFAULT,
required=False)
ap.add_argument('--server',
help="Server that files should have been posted to (default: '" + self.SERVER_DEFAULT + "')",
default=self.SERVER_DEFAULT,
required=False)
ap.add_argument('-g','--genome',
help="The genome assembly that files were aligned to (default: discovered if possible)",
default=None,
required=True)
ap.add_argument('--fastqs_too',
help='Remove fastqs too.',
action='store_true',
required=False)
ap.add_argument('--test',
help='Test run only, do not launch anything.',
action='store_true',
required=False)
ap.add_argument('--start_at',
help="Start processing with this file name (or possibly accession).",
default=None,
required=False)
ap.add_argument('--files',
help="Just delete this number of files (default: all)",
type=int,
default=0,
required=False)
ap.add_argument('--remove_all',
help='Remove all files and directory (default is to leave fastqs and workflows) Requires force!',
action='store_true',
required=False)
ap.add_argument('--force',
help='Remove files regardless of whether they have been posted or not.',
action='store_true',
required=False)
ap.add_argument('--verbose',
help='More debugging output.',
action='store_true',
required=False)
if parse:
return ap.parse_args()
else:
return ap
def pipeline_specification(self,args,exp_type,exp_folder,verbose=False):
'''Sets the pipeline specification object for this experiment type.'''
# Start with dict containing common variables
#self.expected = copy.deepcopy(self.PIPELINE_SPECS[exp_type])
pipeline_specs = self.PIPELINE_SPECS.get(exp_type)
self.annotation = None # TODO: if appropriate, need way to determine annotation
if verbose:
print >> sys.stderr, "Pipeline specification:"
print >> sys.stderr, json.dumps(pipeline_specs,indent=4)
return pipeline_specs
def strip_comments(self,line,ws_too=False):
"""Strips comments from a line (and opptionally leading/trailing whitespace)."""
bam = -1
ix = 0
while True:
bam = line[ix:].find('#',bam + 1)
if bam == -1:
break
bam = ix + bam
if bam == 0:
return ''
if line[ bam - 1 ] != '\\':
line = line[ 0:bam ]
break
else: #if line[ bam - 1 ] == '\\': # ignore '#' and keep looking
ix = bam + 1
#line = line[ 0:bam - 1 ] + line[ bam: ]
if ws_too:
line = line.strip()
return line
def load_exp_list(self,exp_ids,verbose=False):
'''Returns a sorted list of experiment accessions from command-line args.'''
#verbose=True
id_list = []
file_of_ids = None
# If only one, it could be a file
if len(exp_ids) == 1:
candidate = exp_ids[0]
if candidate.startswith("ENCSR") and len(candidate) == 11:
id_list.append(candidate)
return id_list
else:
file_of_ids = candidate
if file_of_ids != None:
with open(file_of_ids, 'r') as fh:
#line = fh.readline()
for line in fh:
line = self.strip_comments(line,True)
if line == '':
continue
candidate = line.split()[0]
if candidate.startswith("ENCSR") and len(candidate) == 11:
id_list.append(candidate)
elif verbose:
print >> sys.stderr, "Value is not experiment id: '"+candidate+"'"
elif len(exp_ids) > 0:
for candidate in exp_ids:
if candidate.startswith("ENCSR") and len(candidate) == 11:
id_list.append(candidate)
elif verbose:
print >> sys.stderr, "Value is not experiment id: '"+candidate+"'"
if len(id_list) > 0:
sorted_exp_ids = sorted(id_list)
if verbose:
print >> sys.stderr, "Experiment ids: "
print >> sys.stderr, json.dumps(sorted_exp_ids)
print "Requested scrubbing %d experiments" % len(sorted_exp_ids)
return sorted_exp_ids
return []
def file_format(self,file_name):
'''Try to determine file format from file name extension.'''
ext = file_name.split(".")[-1]
if ext == "gz" or ext == "tgz":
ext = file_name.split(".")[-2]
if ext in self.EXTENSION_TO_FORMAT.keys():
ext = self.EXTENSION_TO_FORMAT[ext]
if ext in self.FORMATS_SUPPORTED:
return ext
return None
def find_step_files(self,file_globs,result_folder,rep_tech,verbose=False):
'''Returns tuple list of (type,rep_tech,fid) of ALL files expected for a single step.'''
step_files = []
for token in file_globs.keys():
if type(file_globs[token]) == list:
for file_glob in file_globs[token]:
if token != "QC_only":
if self.file_format(file_glob) == None:
print "Error: file glob %s has unknown file format! Please fix" % file_glob
sys.exit(1)
if verbose:
print >> sys.stderr, "-- Looking for %s" % (result_folder + file_glob)
fids = dx.find_file(result_folder + file_glob,self.proj_id, verbose=verbose, multiple=True, recurse=False)
if fids != None:
if not isinstance(fids, list):
fids = [ fids ]
QC_only = (token == "QC_only") # Use new qc_object posting methods
for fid in fids:
step_files.append( (token,rep_tech,fid,QC_only) )
break # Only looking for the first hit
else:
if token != "QC_only":
if self.file_format(file_globs[token]) == None:
print "Error: file glob %s has unknown file format! Please fix" % file_globs[token]
sys.exit(1)
if verbose:
print >> sys.stderr, "-- Looking for %s" % (result_folder + file_globs[token])
fids = dx.find_file(result_folder + file_globs[token],self.proj_id, verbose=verbose, multiple=True, recurse=False)
if fids != None:
if not isinstance(fids, list):
fids = [ fids ]
#if verbose:
# print >> sys.stderr, "-- Found %d files for %s" % (len(fids),result_folder + file_globs[token])
QC_only = (token == "QC_only") # Use new qc_object posting methods
for fid in fids:
step_files.append( (token,rep_tech,fid,QC_only) )
#else:
# return [] # Only include files from completed steps!
return step_files
def find_expected_files(self,exp_folder,replicates,verbose=False):
'''Returns tuple list of (type,rep_tech,fid) of files expected to be posted to ENCODE.'''
expected = []
# First find replicate step files
added_fastqs = False
for step in self.pipeline["step-order"]:
if step not in self.pipeline["replicate"]:
continue
if self.fastqs_too and not added_fastqs and 'fastqs' not in self.pipeline["replicate"][step]:
self.pipeline["replicate"][step]['fastqs'] = "*.fastq.gz"
added_fastqs = True # Just adding this to the first step is all that is needed.
for rep_tech in replicates:
step_files = self.find_step_files(self.pipeline["replicate"][step], \
exp_folder + rep_tech + '/',rep_tech,verbose)
if verbose:
print >> sys.stderr, "-- Found %d files for %s" % (len(step_files),step)
if len(step_files) > 0:
expected.extend(step_files) # keep them in order!
# Now add combined step files
for step in self.pipeline["step-order"]:
if step not in self.pipeline["combined"]:
continue
step_files = self.find_step_files(self.pipeline["combined"][step], \
exp_folder,"combined",verbose)
if len(step_files) > 0:
expected.extend(step_files) # keep them in order!
if verbose:
print >> sys.stderr, "Expected files:"
print >> sys.stderr, json.dumps(expected,indent=4)
return expected
def input_exception(self,inp_fid):
'''Returns True if this is one of a limit number of input files we do not track in encodeD.'''
# TODO: move specifics to json at top of file.
# Unfortunate special case: the map_report is essentially a QC_only file but is an input to a step in order to
# combine multiple map_reports into a single qc_metric.
try:
if self.exp_type != "dna-me" or not dx.file_path_from_fid(inp_fid).endswith("_map_report.txt"):
#print "** Ignoring file: " + dx.file_path_from_fid(inp_fid)
return False
except:
pass
return True
def find_removable_files(self,files_expected,test=True,verbose=False):
'''Returns the tuple list of files that NEED to be posted to ENCODE.'''
removable = []
not_posted = 0
acc_key = dx.property_accesion_key(self.server_key) # 'accession'
for (out_type, rep_tech, fid, QC_only) in files_expected:
if not QC_only:
acc = dx.file_get_property(acc_key,fid)
if acc != None or self.force:
removable.append( (out_type,rep_tech,fid, False) )
elif self.input_exception(fid):
removable.append( (out_type,rep_tech,fid, False) )
else:
# TODO: back up plan, look on encodeD?
not_posted += 1
print >> sys.stderr, "* WARNING: file '" + dx.file_path_from_fid(fid) + \
"' has not been posted, and will not be scrubbed."
#else: # TODO: How to handle qc_only files (other than just removing them)?
if not_posted > 0 and not self.force: # If even one file is not posted, then none are removable
return []
# if all expected non-QC files are remobable, then go ahead and remove the qc ones as well
for (out_type, rep_tech, fid, QC_only) in files_expected:
if QC_only:
removable.append( (out_type,rep_tech,fid, True) )
if verbose:
print >> sys.stderr, "Removable files:"
print >> sys.stderr, json.dumps(removable,indent=4)
return removable
def run(self):
'''Runs scrub from start to finish using command line arguments.'''
args = self.get_args()
self.test = args.test
self.genome = args.genome
self.force = args.force
self.remove_all = args.remove_all
self.fastqs_too = args.fastqs_too
self.server_key = args.server
encd.set_server_key(self.server_key) # TODO: change to self.encd = Encd(self.server_key)
self.server = encd.get_server()
if self.server_key == "www":
self.acc_prefix = "ENCFF"
self.proj_name = dx.env_get_current_project()
if self.proj_name == None or args.project != None:
self.proj_name = args.project
if self.proj_name == None:
print "Please enter a '--project' to run in."
sys.exit(1)
self.project = dx.get_project(self.proj_name)
self.proj_id = self.project.get_id()
print "== Running in project [%s] and expect files already posted to the [%s] server ==" % \
(self.proj_name,self.server_key)
self.exp_ids = self.load_exp_list(args.experiments,verbose=args.verbose)
if len(self.exp_ids) == 0:
print >> sys.stderr, "No experiment id's requested."
self.ap.print_help()
sys.exit(1)
exp_count = 0
exp_removed = 0
exp_kept = 0
deprecates_removed = 0
total_removed = 0
for exp_id in self.exp_ids:
dx.clear_cache()
sys.stdout.flush() # Slow running job should flush to piped log
self.exp_id = exp_id
# 1) Lookup experiment type from encoded, based on accession
print "Working on %s..." % self.exp_id
self.exp = encd.get_exp(self.exp_id,must_find=True)
if self.exp == None or self.exp["status"] == "error":
print "Unable to locate experiment %s in encoded (%s)" % (self.exp_id, self.server_key)
continue
self.exp_type = encd.get_exp_type(self.exp_id,self.exp,self.EXPERIMENT_TYPES_SUPPORTED)
if self.exp_type == None:
continue
# 2) Locate the experiment accession named folder
# NOTE: genome and annotation are not known for this exp yet, so the umbrella folder is just based on exp_type
self.umbrella_folder = dx.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,self.exp_type,"posted",self.genome)
if args.test:
print "- Umbrella folder: " + self.umbrella_folder
self.exp_folder = dx.find_exp_folder(self.project,exp_id,self.umbrella_folder,warn=True)
if self.exp_folder == None:
continue
exp_count += 1
print "- Examining %s:%s for '%s' results..." % \
(self.proj_name, self.exp_folder, self.exp_type)
# Could be quick... remove everything!
if self.remove_all and self.force:
exp_removed += 1
if self.test:
print "* Would remove %s:%s and all results within..." % (self.proj_name, self.exp_folder)
else:
print "* Removing %s:%s and all results within..." % (self.proj_name, self.exp_folder)
dxpy.api.project_remove_folder(self.proj_id,{'folder':self.exp_folder,'recurse':True})
continue
# Remove any 'deprecated' subfolder
deprecated_folder = self.exp_folder + "deprecated/"
if dx.project_has_folder(self.project, deprecated_folder):
deprecates_removed += 1
if self.test:
print "* Would remove %s:%s and all results within..." % (self.proj_name, deprecated_folder)
else:
print "* Removing %s:%s and all results within..." % (self.proj_name, deprecated_folder)
dxpy.api.project_remove_folder(self.proj_id,{'folder':deprecated_folder,'recurse':True})
# 3) Given the experiment type, determine the expected results
self.pipeline = self.pipeline_specification(args,self.exp_type,self.exp_folder)
self.replicates = dx.find_replicate_folders(self.project,self.exp_folder, verbose=args.verbose)
# 4) Given expected results locate any files (by glob) that should have been posted for
# a) each single replicate (in replicate sub-folders named as reN_N/
# b) combined replicates in the experiment folder itself
files_expected = self.find_expected_files(self.exp_folder, self.replicates, verbose=args.verbose)
print "- Found %d files that are available to remove." % len(files_expected)
if len(files_expected) == 0:
continue
# 5) For each file that is available to be removed, determine if the file has been posted first.
files_to_remove = self.find_removable_files(files_expected, test=self.test, verbose=args.verbose)
print "- Found %d files that may be removed" % len(files_to_remove)
if len(files_to_remove) == 0:
print "- KEEPING: If even one file has not been posted, no files may be removed without force."
exp_kept += 1
continue
# 6) For each file that needs to be removed:
files_removed = 0
for (out_type,rep_tech,fid,QC_only) in files_to_remove:
sys.stdout.flush() # Slow running job should flush to piped log
if args.files != 0 and file_count >= args.files: # Short circuit for test
print "- Just trying %d file(s) by request" % file_count
partial = True
break
try:
# prove it exists before continuing.
file_name = dx.file_path_from_fid(fid)
except:
continue
if args.start_at != None:
if not file_name.endswith(args.start_at):
continue
else:
print "- Starting at %s" % (file_name)
args.start_at = None
if self.test:
print " * Would remove file %s..." % file_name
else:
print " * Removing file %s..." % file_name
dxpy.api.project_remove_objects(self.proj_id,{'objects':[fid]})
files_removed += 1
if not args.test:
print "- For %s processed %d file(s), removed %d files" % (self.exp_id, len(files_expected), files_removed)
else:
print "- For %s processed %d file(s), would remove %d files" % (self.exp_id, len(files_expected), files_removed)
total_removed += files_removed
if not args.test:
print "Processed %d experiment(s), erased %d, kept %d, removed %d deprecate folder(s) and %d file(s)" % \
(exp_count, exp_removed, exp_kept, deprecates_removed, total_removed)
else:
print "Processed %d experiment(s), would erase %d, would keep %d, would remove %d deprecate folder(s) and %d file(s)" % \
(exp_count, exp_removed, exp_kept, deprecates_removed, total_removed)
print "(finished)"
if __name__ == '__main__':
'''Run from the command line.'''
scrub = Scrub()
scrub.run()
| mit |
nemaniarjun/coala | tests/misc/ContextManagersTest.py | 31 | 5560 | import os
import subprocess
import sys
from tempfile import TemporaryDirectory
import unittest
from coala_utils.ContextManagers import (
change_directory, make_temp, prepare_file, retrieve_stdout,
retrieve_stderr, simulate_console_inputs, subprocess_timeout,
suppress_stdout)
from coalib.processes.Processing import create_process_group
process_group_timeout_test_code = """
import time, subprocess, sys;
p = subprocess.Popen([sys.executable,
"-c",
"import time; time.sleep(100)"]);
time.sleep(100);
"""
class ContextManagersTest(unittest.TestCase):
def test_subprocess_timeout(self):
p = subprocess.Popen([sys.executable,
'-c',
'import time; time.sleep(0.5);'],
stderr=subprocess.PIPE)
with subprocess_timeout(p, 0.2) as timedout:
retval = p.wait()
p.stderr.close()
self.assertEqual(timedout.value, True)
self.assertNotEqual(retval, 0)
p = create_process_group([sys.executable,
'-c',
process_group_timeout_test_code])
with subprocess_timeout(p, 0.5, kill_pg=True):
retval = p.wait()
self.assertEqual(timedout.value, True)
self.assertNotEqual(retval, 0)
p = subprocess.Popen([sys.executable,
'-c',
'import time'])
with subprocess_timeout(p, 0.5) as timedout:
retval = p.wait()
self.assertEqual(timedout.value, False)
self.assertEqual(retval, 0)
p = subprocess.Popen([sys.executable,
'-c',
'import time'])
with subprocess_timeout(p, 0) as timedout:
retval = p.wait()
self.assertEqual(timedout.value, False)
self.assertEqual(retval, 0)
def test_suppress_stdout(self):
def print_func():
print('func')
raise NotImplementedError
def no_print_func():
with suppress_stdout():
print('func')
raise NotImplementedError
old_stdout = sys.stdout
sys.stdout = False
self.assertRaises(AttributeError, print_func)
self.assertRaises(NotImplementedError, no_print_func)
sys.stdout = old_stdout
def test_retrieve_stdout(self):
with retrieve_stdout() as sio:
print('test', file=sys.stdout)
self.assertEqual(sio.getvalue(), 'test\n')
def test_retrieve_stderr(self):
with retrieve_stderr() as sio:
print('test', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'test\n')
def test_simulate_console_inputs(self):
with simulate_console_inputs(0, 1, 2) as generator:
self.assertEqual(input(), 0)
self.assertEqual(generator.last_input, 0)
generator.inputs.append(3)
self.assertEqual(input(), 1)
self.assertEqual(input(), 2)
self.assertEqual(input(), 3)
self.assertEqual(generator.last_input, 3)
with simulate_console_inputs('test'), self.assertRaises(ValueError):
self.assertEqual(input(), 'test')
input()
def test_make_temp(self):
with make_temp() as f_a:
self.assertTrue(os.path.isfile(f_a))
self.assertTrue(os.path.basename(f_a).startswith('tmp'))
self.assertFalse(os.path.isfile(f_a))
with make_temp(suffix='.orig', prefix='pre') as f_b:
self.assertTrue(f_b.endswith('.orig'))
self.assertTrue(os.path.basename(f_b).startswith('pre'))
def test_prepare_file(self):
with prepare_file(['line1', 'line2\n'],
'/file/name',
force_linebreaks=True,
create_tempfile=True) as (lines, filename):
self.assertEqual(filename, '/file/name')
self.assertEqual(lines, ['line1\n', 'line2\n'])
with prepare_file(['line1', 'line2\n'],
None,
force_linebreaks=False,
create_tempfile=True) as (lines, filename):
self.assertTrue(os.path.isfile(filename))
self.assertEqual(lines, ['line1', 'line2\n'])
with prepare_file(['line1', 'line2\n'],
None,
tempfile_kwargs={'suffix': '.test',
'prefix': 'test_'},
force_linebreaks=False,
create_tempfile=True) as (lines, filename):
self.assertTrue(os.path.isfile(filename))
basename = os.path.basename(filename)
self.assertTrue(basename.endswith('.test'))
self.assertTrue(basename.startswith('test_'))
with prepare_file(['line1', 'line2\n'],
None,
force_linebreaks=False,
create_tempfile=False) as (lines, filename):
self.assertEqual(filename, 'dummy_file_name')
def test_change_directory(self):
old_dir = os.getcwd()
with TemporaryDirectory('temp') as tempdir:
tempdir = os.path.realpath(tempdir)
with change_directory(tempdir):
self.assertEqual(os.getcwd(), tempdir)
self.assertEqual(os.getcwd(), old_dir)
| agpl-3.0 |
mrkm4ntr/incubator-airflow | airflow/migrations/versions/bbf4a7ad0465_remove_id_column_from_xcom.py | 6 | 1965 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Remove id column from xcom
Revision ID: bbf4a7ad0465
Revises: cf5dc11e79ad
Create Date: 2019-10-29 13:53:09.445943
"""
from alembic import op
from sqlalchemy import Column, Integer
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'bbf4a7ad0465'
down_revision = 'cf5dc11e79ad'
branch_labels = None
depends_on = None
def upgrade():
"""Apply Remove id column from xcom"""
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
with op.batch_alter_table('xcom') as bop:
xcom_columns = [col.get('name') for col in inspector.get_columns("xcom")]
if "id" in xcom_columns:
bop.drop_column('id')
bop.drop_index('idx_xcom_dag_task_date')
bop.create_primary_key('pk_xcom', ['dag_id', 'task_id', 'key', 'execution_date'])
def downgrade():
"""Unapply Remove id column from xcom"""
with op.batch_alter_table('xcom') as bop:
bop.drop_constraint('pk_xcom', type_='primary')
bop.add_column(Column('id', Integer, primary_key=True))
bop.create_index('idx_xcom_dag_task_date', ['dag_id', 'task_id', 'key', 'execution_date'])
| apache-2.0 |
tinloaf/home-assistant | homeassistant/components/fan/zwave.py | 4 | 2832 | """
Z-Wave platform that handles fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.zwave/
"""
import logging
import math
from homeassistant.core import callback
from homeassistant.components.fan import (
DOMAIN, FanEntity, SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH,
SUPPORT_SET_SPEED)
from homeassistant.components import zwave
from homeassistant.helpers.dispatcher import async_dispatcher_connect
_LOGGER = logging.getLogger(__name__)
SPEED_LIST = [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
# Value will first be divided to an integer
VALUE_TO_SPEED = {
0: SPEED_OFF,
1: SPEED_LOW,
2: SPEED_MEDIUM,
3: SPEED_HIGH,
}
SPEED_TO_VALUE = {
SPEED_OFF: 0,
SPEED_LOW: 1,
SPEED_MEDIUM: 50,
SPEED_HIGH: 99,
}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old method of setting up Z-Wave fans."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Fan from Config Entry."""
@callback
def async_add_fan(fan):
"""Add Z-Wave Fan."""
async_add_entities([fan])
async_dispatcher_connect(hass, 'zwave_new_fan', async_add_fan)
def get_device(values, **kwargs):
"""Create Z-Wave entity device."""
return ZwaveFan(values)
class ZwaveFan(zwave.ZWaveDeviceEntity, FanEntity):
"""Representation of a Z-Wave fan."""
def __init__(self, values):
"""Initialize the Z-Wave fan device."""
zwave.ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
value = math.ceil(self.values.primary.data * 3 / 100)
self._state = VALUE_TO_SPEED[value]
def set_speed(self, speed):
"""Set the speed of the fan."""
self.node.set_dimmer(
self.values.primary.value_id, SPEED_TO_VALUE[speed])
def turn_on(self, speed=None, **kwargs):
"""Turn the device on."""
if speed is None:
# Value 255 tells device to return to previous value
self.node.set_dimmer(self.values.primary.value_id, 255)
else:
self.set_speed(speed)
def turn_off(self, **kwargs):
"""Turn the device off."""
self.node.set_dimmer(self.values.primary.value_id, 0)
@property
def speed(self):
"""Return the current speed."""
return self._state
@property
def speed_list(self):
"""Get the list of available speeds."""
return SPEED_LIST
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
| apache-2.0 |
qwefi/nova | smoketests/base.py | 15 | 7137 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import boto
from boto.ec2 import regioninfo
import commands
import httplib
import os
import paramiko
import sys
import time
import unittest
from smoketests import flags
SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS
flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
flags.DEFINE_integer('ssh_tries', 3, 'Numer of times to try ssh')
class SmokeTestCase(unittest.TestCase):
def connect_ssh(self, ip, key_name):
key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name)
tries = 0
while(True):
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, username='root', pkey=key, timeout=5)
return client
except (paramiko.AuthenticationException, paramiko.SSHException):
tries += 1
if tries == FLAGS.ssh_tries:
raise
def can_ping(self, ip, command="ping"):
"""Attempt to ping the specified IP, and give up after 1 second."""
# NOTE(devcamcar): ping timeout flag is different in OSX.
if sys.platform == 'darwin':
timeout_flag = 't'
else:
timeout_flag = 'w'
status, output = commands.getstatusoutput('%s -c1 -%s1 %s' %
(command, timeout_flag, ip))
return status == 0
def wait_for_running(self, instance, tries=60, wait=1):
"""Wait for instance to be running."""
for x in xrange(tries):
instance.update()
if instance.state.startswith('running'):
return True
time.sleep(wait)
else:
return False
def wait_for_deleted(self, instance, tries=60, wait=1):
"""Wait for instance to be deleted."""
for x in xrange(tries):
try:
#NOTE(dprince): raises exception when instance id disappears
instance.update(validate=True)
except ValueError:
return True
time.sleep(wait)
else:
return False
def wait_for_ping(self, ip, command="ping", tries=120):
"""Wait for ip to be pingable."""
for x in xrange(tries):
if self.can_ping(ip, command):
return True
else:
return False
def wait_for_ssh(self, ip, key_name, tries=30, wait=5):
"""Wait for ip to be sshable."""
for x in xrange(tries):
try:
conn = self.connect_ssh(ip, key_name)
conn.close()
except Exception:
time.sleep(wait)
else:
return True
else:
return False
def connection_for_env(self, **kwargs):
"""
Returns a boto ec2 connection for the current environment.
"""
access_key = os.getenv('EC2_ACCESS_KEY')
secret_key = os.getenv('EC2_SECRET_KEY')
clc_url = os.getenv('EC2_URL')
if not access_key or not secret_key or not clc_url:
raise Exception('Missing EC2 environment variables. Please source '
'the appropriate novarc file before running this '
'test.')
parts = self.split_clc_url(clc_url)
if FLAGS.use_ipv6:
return boto_v6.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=regioninfo.RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
return boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=regioninfo.RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
def split_clc_url(self, clc_url):
"""Splits a cloud controller endpoint url."""
parts = httplib.urlsplit(clc_url)
is_secure = parts.scheme == 'https'
ip, port = parts.netloc.split(':')
return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
def create_key_pair(self, conn, key_name):
try:
os.remove('/tmp/%s.pem' % key_name)
except Exception:
pass
key = conn.create_key_pair(key_name)
key.save('/tmp/')
return key
def delete_key_pair(self, conn, key_name):
conn.delete_key_pair(key_name)
try:
os.remove('/tmp/%s.pem' % key_name)
except Exception:
pass
def bundle_image(self, image, tempdir='/tmp', kernel=False):
cmd = 'euca-bundle-image -i %s -d %s' % (image, tempdir)
if kernel:
cmd += ' --kernel true'
status, output = commands.getstatusoutput(cmd)
if status != 0:
raise Exception(output)
return True
def upload_image(self, bucket_name, image, tempdir='/tmp'):
cmd = 'euca-upload-bundle -b '
cmd += '%s -m %s/%s.manifest.xml' % (bucket_name, tempdir, image)
status, output = commands.getstatusoutput(cmd)
if status != 0:
raise Exception(output)
return True
def delete_bundle_bucket(self, bucket_name):
cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name)
status, output = commands.getstatusoutput(cmd)
if status != 0:
raise Exception(output)
return True
TEST_DATA = {}
if FLAGS.use_ipv6:
global boto_v6
boto_v6 = __import__('boto_v6')
class UserSmokeTestCase(SmokeTestCase):
def setUp(self):
global TEST_DATA
self.conn = self.connection_for_env()
self.data = TEST_DATA
| apache-2.0 |
edx-solutions/edx-platform | cms/djangoapps/course_creators/migrations/0001_initial.py | 5 | 1191 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseCreator',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state_changed', models.DateTimeField(help_text='The date when state was last updated', verbose_name=u'state last updated', auto_now_add=True)),
('state', models.CharField(default=u'unrequested', help_text='Current course creator state', max_length=24, choices=[(u'unrequested', 'unrequested'), (u'pending', 'pending'), (u'granted', 'granted'), (u'denied', 'denied')])),
('note', models.CharField(help_text='Optional notes about this user (for example, why course creation access was denied)', max_length=512, blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, help_text='Studio user', on_delete=models.CASCADE)),
],
),
]
| agpl-3.0 |
jmartinm/invenio | modules/oaiharvest/lib/oai_harvest_dblayer.py | 17 | 17170 | ## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import time
import zlib
from invenio.dbquery import run_sql, serialize_via_marshal
from invenio.bibrecord import create_records, record_extract_oai_id
class HistoryEntry:
date_harvested = None
date_inserted = None
oai_id = ""
record_id = 0
bibupload_task_id = ""
inserted_to_db = ""
oai_src_id = 0
def __init__(self, date_harvested, date_inserted, oai_src_id, oai_id, record_id, inserted_to_db, bibupload_task_id):
self.date_harvested = date_harvested
self.date_inserted = date_inserted
self.record_id = record_id
self.oai_id = oai_id
self.bibupload_task_id = bibupload_task_id
self.oai_src_id = oai_src_id
self.inserted_to_db = inserted_to_db
def __repr__(self):
return str(self)
def __str__(self):
return "HistoryEntry(" + \
"date_harvested: " + str(self.date_harvested) + ', ' + \
"date_inserted: " + str(self.date_inserted) + ', ' + \
"oai_id: " + str(self.oai_id) + ', ' + \
"record_id: " + str(self.record_id) + ', ' + \
"bibupload_task_id: " + str(self.bibupload_task_id) + ', ' + \
"inserted_to_db: " + str(self.inserted_to_db) + ', ' + \
"oai_src_id: " + str(self.oai_src_id) + ', ' + ")"
def update_lastrun(index, runtime=None):
""" A method that updates the lastrun of a repository
successfully harvested """
try:
if not runtime:
runtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sql = 'UPDATE oaiHARVEST SET lastrun=%s WHERE id=%s'
run_sql(sql, (runtime, index))
return 1
except StandardError, e:
return (0, e)
def create_oaiharvest_log_str(task_id, oai_src_id, xml_content):
"""
Function which creates the harvesting logs
@param task_id bibupload task id
"""
try:
records = create_records(xml_content)
for record in records:
oai_id = record_extract_oai_id(record[0])
query = "INSERT INTO oaiHARVESTLOG (id_oaiHARVEST, oai_id, date_harvested, bibupload_task_id) VALUES (%s, %s, NOW(), %s)"
run_sql(query, (str(oai_src_id), str(oai_id), str(task_id)))
except Exception, msg:
print "Logging exception : %s " % (str(msg),)
def get_history_entries_raw(query_suffix, sqlparameters):
"""
Internally used function which obtains sql query suffix ( starting from WHERE)
and
"""
query_prefix = "SELECT date_harvested, date_inserted, id_oaiHARVEST, oai_id, id_bibrec, inserted_to_db, bibupload_task_id FROM oaiHARVESTLOG "
query = query_prefix + query_suffix
res = run_sql(query, sqlparameters)
result = []
for entry in res:
result.append(HistoryEntry(entry[0], entry[1], \
int(entry[2]), str(entry[3]), int(entry[4]),\
str(entry[5]), int(entry[6])))
return result
def get_history_entries(oai_src_id, monthdate, method = "harvested"):
sql_column = "date_harvested"
if method == "inserted":
sql_column = "date_inserted"
query_suffix = "WHERE id_oaiHARVEST = %s AND MONTH(" + sql_column + ") = %s AND YEAR(" + sql_column + ") = %s ORDER BY " + sql_column
return get_history_entries_raw(query_suffix,(str(oai_src_id), str(monthdate.month), str(monthdate.year)))
def get_history_entries_for_day(oai_src_id, date, limit = -1, start = 0, method = "harvested"):
"""
Returns harvesting history entries for a given day
@param oai_src_id: harvesting source identifier
@param date: Date designing the deserved day
@param limit: How many records (at most) do we want to get
@param start: From which index do we want to start ?
@param method: method of getting data (two possible values "harvested" and "inserted")
Describes if the harvesting or inserting data should be used
"""
sql_column = "date_harvested"
if method == "inserted":
sql_column = "date_inserted"
query_suffix = "WHERE id_oaiHARVEST = %s AND MONTH(" + sql_column + ") = %s AND YEAR(" + sql_column + ") = %s AND DAY(" + sql_column + ") = %s ORDER BY " + sql_column
if limit > 0:
query_suffix += " LIMIT " + str(start) + "," + str(limit)
return get_history_entries_raw(query_suffix, (str(oai_src_id), str(date.month), str(date.year), str(date.day)))
def get_entry_history(oai_id, start = 0, limit = -1 , method = "harvested"):
"""
Returns harvesting history entries for a given OAI identifier ( Show results from multiple sources )
@limit - How many records (at most) do we want to get
@start - From which index do we want to start ?
@method - method of getting data (two possible values "harvested" and "inserted")
Describes if the harvesting or inserting data should be used
"""
sql_column = "date_harvested"
if method == "inserted":
sql_column = "date_inserted"
query_suffix = "WHERE oai_id = %s ORDER BY " + sql_column
if limit > 0:
query_suffix += " LIMIT " + str(start) + "," + str(limit)
return get_history_entries_raw(query_suffix, (str(oai_id),))
def get_month_logs_size(oai_src_id, date, method = "harvested"):
"""
Function which returns number of inserts which took place in given month (splited into days)
@param oai_src_id: harvesting source identifier
@return: Dictionary of harvesting statistics - keys describe days. values - numbers of inserted recordds
"""
sql_column = "date_harvested"
if method == "inserted":
sql_column = "date_inserted"
query = "SELECT DAY(" + sql_column + "), COUNT(*) FROM oaiHARVESTLOG WHERE id_oaiHARVEST = %s AND MONTH(" + sql_column + ") = %s AND YEAR(" + sql_column + ")= %s GROUP BY DAY(" + sql_column+ ")"
query_result = run_sql(query, (str(oai_src_id), str(date.month), str(date.year)))
result = {}
for entry in query_result:
if int(entry[0]) != 0:
result[int(entry[0])] = int(entry[1])
return result
def get_day_logs_size(oai_src_id, date, method = "harvested"):
"""
Function which returns number of inserts which took place in given day
@param oai_src_id: harvesting source identifier
@return: Number of inserts during the given day
"""
sql_column = "date_harvested"
if method == "inserted":
sql_column = "date_inserted"
query = "SELECT COUNT(*) FROM oaiHARVESTLOG WHERE id_oaiHARVEST = %s AND MONTH(" + sql_column + ") = %s AND YEAR(" + sql_column+ ")= %s AND DAY(" + sql_column + ") = %s"
query_result = run_sql(query, (str(oai_src_id), str(date.month), str(date.year), str(date.day)))
for entry in query_result:
return int(entry[0])
return 0
def get_entry_logs_size(oai_id):
"""
Function which returns number of inserts which took place in given day
@param oai_src_id: harvesting source identifier
@return: Number of inserts during the given day
"""
query = "SELECT COUNT(*) FROM oaiHARVESTLOG WHERE oai_id = %s"
query_result = run_sql(query, (str(oai_id),))
for entry in query_result:
return int(entry[0])
return 0
##################################################################
### Here the functions to retrieve, modify, delete and add sources
##################################################################
def get_oai_src_by_id(oai_src_id):
"""
Returns a list of dictionaries with source parameters for a given id.
"""
return get_oai_src({'id': oai_src_id})
def get_oai_src_by_name(oai_src_name):
"""
Returns a list of dictionaries with source parameters for a source name.
"""
return get_oai_src({'name': oai_src_name})
def get_all_oai_src():
"""
Returns a list of dictionaries with source parameters for a given id.
"""
return get_oai_src()
def get_oai_src(params={}):
"""
Returns a list of dictionaries each representing a DB row for a OAI source.
"""
sql = """SELECT id, baseurl, metadataprefix, arguments,
comment, name, lastrun,
frequency, postprocess, setspecs
FROM oaiHARVEST"""
sql_params = []
if params:
for key, value in params.items():
if "WHERE" not in sql:
sql += " WHERE"
else:
sql += " AND"
sql += " " + key + "=%s"
sql_params.append(value)
new_res = []
res = run_sql(sql, sql_params, with_dict=True)
if res:
for result in res:
for key, value in result.iteritems():
if value is None:
if key == "arguments":
value = {}
else:
value = ""
result[key] = value
new_res.append(result)
return new_res
def modify_oai_src(oai_src_id, oai_src_name, oai_src_baseurl, oai_src_prefix,
oai_src_frequency, oai_src_post, oai_src_comment,
oai_src_sets=None, oai_src_args=None):
"""Modifies a row's parameters"""
if oai_src_sets is None:
oai_src_sets = []
if oai_src_post is None:
oai_src_post = []
if oai_src_args is None:
oai_src_args = {}
sql = """UPDATE oaiHARVEST
SET baseurl=%s, metadataprefix=%s, arguments=%s, comment=%s,
name=%s, frequency=%s, postprocess=%s, setspecs=%s
WHERE id=%s"""
try:
run_sql(sql, (oai_src_baseurl,
oai_src_prefix,
serialize_via_marshal(oai_src_args),
oai_src_comment,
oai_src_name,
oai_src_frequency,
'-'.join(oai_src_post),
' '.join(oai_src_sets),
oai_src_id))
return (1, "")
except StandardError, e:
return (0, e)
def add_oai_src(oai_src_name, oai_src_baseurl, oai_src_prefix, oai_src_frequency,
oai_src_lastrun, oai_src_post, oai_src_comment,
oai_src_sets=None, oai_src_args=None):
"""Adds a new row to the database with the given parameters"""
if oai_src_sets is None:
oai_src_sets = []
if oai_src_args is None:
oai_src_args = {}
#return (0, str(serialize_via_marshal(oai_src_args)))
try:
if oai_src_lastrun in [0, "0"]: lastrun_mode = 'NULL'
else:
lastrun_mode = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# lastrun_mode = "'"+lastrun_mode+"'"
run_sql("INSERT INTO oaiHARVEST "
"(baseurl, metadataprefix, arguments, comment, name, lastrun, "
"frequency, postprocess, setspecs) VALUES "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(oai_src_baseurl, oai_src_prefix, serialize_via_marshal(oai_src_args), \
oai_src_comment, oai_src_name, lastrun_mode, oai_src_frequency, \
"-".join(oai_src_post), " ".join(oai_src_sets)))
return (1, "")
except StandardError, e:
return (0, e)
def delete_oai_src(oai_src_id):
"""Deletes a row from the database according to its id"""
try:
res = run_sql("DELETE FROM oaiHARVEST WHERE id=%s" % oai_src_id)
return (1, "")
except StandardError, e:
return (0, e)
def get_tot_oai_src():
"""Returns number of rows in the database"""
try:
sql = "SELECT COUNT(*) FROM oaiHARVEST"
res = run_sql(sql)
return res[0][0]
except StandardError, e:
return ""
def get_update_status():
"""Returns a table showing a list of all rows and their LastUpdate status"""
try:
sql = "SELECT name,lastrun FROM oaiHARVEST ORDER BY lastrun desc"
res = run_sql(sql)
return res
except StandardError, e:
return ""
def get_next_schedule():
"""Returns the next scheduled oaiharvestrun tasks"""
try:
sql = "SELECT runtime,status FROM schTASK WHERE proc='oaiharvest' AND runtime > now() ORDER by runtime LIMIT 1"
res = run_sql(sql)
if len(res) > 0:
return res[0]
else:
return ("", "")
except StandardError, e:
return ("", "")
##################################################################
###### Here the functions related to Holding Pen operations ######
##################################################################
def get_holdingpen_entries(start = 0, limit = 0):
query = "SELECT oai_id, changeset_date, update_id FROM bibHOLDINGPEN ORDER BY changeset_date"
if limit > 0 or start > 0:
query += " LIMIT " + str(start) + "," + str(limit)
return run_sql(query)
def get_holdingpen_entry(oai_id, date_inserted):
query = "SELECT changeset_xml FROM bibHOLDINGPEN WHERE changeset_date = %s AND oai_id = %s"
changeset_xml = run_sql(query, (str(date_inserted), str(oai_id)))[0][0]
try:
changeset_xml = zlib.decompress(changeset_xml)
except zlib.error:
# Legacy: the xml can be in TEXT format, leave it unchanged
pass
return changeset_xml
def delete_holdingpen_entry(hpupdate_id):
query = "DELETE FROM bibHOLDINGPEN WHERE changeset_id=%s"
run_sql(query, (hpupdate_id, ))
def get_holdingpen_day_fragment(year, month, day, limit, start, filter_key):
"""
returning the entries form the a particular day
"""
filterSql = ""
if filter_key != "":
filterSql = " and oai_id like '%%%s%%' " % (filter_key, )
query = "SELECT oai_id, changeset_date, changeset_id FROM bibHOLDINGPEN WHERE changeset_date > '%i-%i-%i 00:00:00' and changeset_date <= '%i-%i-%i 23:59:59' %s ORDER BY changeset_date LIMIT %i, %i" % (year, month, day, year, month, day, filterSql, start, limit)
query_results = run_sql(query)
return query_results
def get_holdingpen_day_size(year, month, day, filter_key):
"""
returning the entries form the a particular day
"""
filterSql = ""
if filter_key != "":
filterSql = " and oai_id like '%%%s%%' " % (filter_key, )
query = "SELECT count(*) FROM bibHOLDINGPEN WHERE year(changeset_date) = '%i' and month(changeset_date) = '%i' and day(changeset_date) = '%i' %s" % (year, month, day, filterSql)
query_results = run_sql(query)
return int(query_results[0][0])
def get_holdingpen_month(year, month, filter_key):
"""
Returning the statistics about the entries form a particular month
"""
filterSql = ""
if filter_key != "":
filterSql = " and oai_id like '%%%s%%' " % (filter_key, )
query = "select day(changeset_date), count(*) from bibHOLDINGPEN where year(changeset_date) = '%i' and month(changeset_date) = '%i' %s group by day(changeset_date)" % (year, month, filterSql)
return run_sql(query)
def get_holdingpen_year(year, filter_key):
"""
Returning the statistics about the entries from a particular year
"""
filterSql = ""
if filter_key != "":
filterSql = " and oai_id like '%%%s%%' " % (filter_key, )
query = "select month(changeset_date), count(*) from bibHOLDINGPEN where year(changeset_date) = '%i' %s group by month(changeset_date)" % (year, filterSql)
return run_sql(query)
def get_holdingpen_years(filter_key):
"""
Returning the particular years of records present in the holding pen
"""
filterSql = ""
if filter_key != "":
filterSql = " where oai_id like '%%%s%%' " % (filter_key, )
query = "select year(changeset_date), count(*) changeset_date from bibHOLDINGPEN %s group by year(changeset_date)" % (filterSql,)
results = run_sql(query)
return results
def get_holdingpen_entry_details(hpupdate_id):
"""
Returning the detials of the Holding Pen entry, the result of this function is a tuple:
(oai_id, record_id, date_inserted, content)
"""
query = "SELECT oai_id, id_bibrec, changeset_date, changeset_xml FROM bibHOLDINGPEN WHERE changeset_id=%s"
res = run_sql(query, (hpupdate_id,))
if res:
try:
changeset_xml = zlib.decompress(res[0][3])
return res[0][0], res[0][1], res[0][2], changeset_xml
except zlib.error:
# Legacy: the xml can be in TEXT format, leave it unchanged
pass
return res[0]
| gpl-2.0 |
amondot/QGIS | python/console/console.py | 3 | 34587 | # -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from PyQt4.QtCore import Qt, QTimer, QSettings, QCoreApplication, QSize, QByteArray, QFileInfo, SIGNAL
from PyQt4.QtGui import QDockWidget, QToolBar, QToolButton, QWidget, QSplitter, QTreeWidget, QAction, QFileDialog, QCheckBox, QSizePolicy, QMenu, QGridLayout, QApplication
from PyQt4 import pyqtconfig
from qgis.utils import iface
from console_sci import ShellScintilla
from console_output import ShellOutputScintilla
from console_editor import EditorTabWidget
from console_settings import optionsDialog
from qgis.core import QgsApplication, QgsContextHelp
from qgis.gui import QgsFilterLineEdit
import sys
_console = None
def show_console():
""" called from QGIS to open the console """
global _console
if _console is None:
parent = iface.mainWindow() if iface else None
_console = PythonConsole( parent )
_console.show() # force show even if it was restored as hidden
# set focus to the console so the user can start typing
# defer the set focus event so it works also whether the console not visible yet
QTimer.singleShot(0, _console.activate)
else:
_console.setVisible(not _console.isVisible())
# set focus to the console so the user can start typing
if _console.isVisible():
_console.activate()
## Shows help on first launch of the console
settings = QSettings()
if settings.value('pythonConsole/contextHelpOnFirstLaunch', True, type=bool):
QgsContextHelp.run( "PythonConsole" )
settings.setValue('pythonConsole/contextHelpOnFirstLaunch', False)
_old_stdout = sys.stdout
_console_output = None
# hook for python console so all output will be redirected
# and then shown in console
def console_displayhook(obj):
global _console_output
_console_output = obj
class PythonConsole(QDockWidget):
def __init__(self, parent=None):
QDockWidget.__init__(self, parent)
self.setObjectName("PythonConsole")
self.setWindowTitle(QCoreApplication.translate("PythonConsole", "Python Console"))
#self.setAllowedAreas(Qt.BottomDockWidgetArea)
self.console = PythonConsoleWidget(self)
self.setWidget( self.console )
self.setFocusProxy( self.console )
# try to restore position from stored main window state
if iface and not iface.mainWindow().restoreDockWidget(self):
iface.mainWindow().addDockWidget(Qt.BottomDockWidgetArea, self)
def activate(self):
self.activateWindow()
self.raise_()
QDockWidget.setFocus(self)
def closeEvent(self, event):
self.console.saveSettingsConsole()
QWidget.closeEvent(self, event)
class PythonConsoleWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setWindowTitle(QCoreApplication.translate("PythonConsole", "Python Console"))
self.settings = QSettings()
self.shell = ShellScintilla(self)
self.setFocusProxy(self.shell)
self.shellOut = ShellOutputScintilla(self)
self.tabEditorWidget = EditorTabWidget(self)
##------------ UI -------------------------------
self.splitterEditor = QSplitter(self)
self.splitterEditor.setOrientation(Qt.Horizontal)
self.splitterEditor.setHandleWidth(6)
self.splitterEditor.setChildrenCollapsible(True)
self.splitter = QSplitter(self.splitterEditor)
self.splitter.setOrientation(Qt.Vertical)
self.splitter.setHandleWidth(3)
self.splitter.setChildrenCollapsible(False)
self.splitter.addWidget(self.shellOut)
self.splitter.addWidget(self.shell)
#self.splitterEditor.addWidget(self.tabEditorWidget)
self.splitterObj = QSplitter(self.splitterEditor)
self.splitterObj.setHandleWidth(3)
self.splitterObj.setOrientation(Qt.Horizontal)
#self.splitterObj.setSizes([0, 0])
#self.splitterObj.setStretchFactor(0, 1)
self.widgetEditor = QWidget(self.splitterObj)
self.widgetFind = QWidget(self)
self.listClassMethod = QTreeWidget(self.splitterObj)
self.listClassMethod.setColumnCount(2)
objInspLabel = QCoreApplication.translate("PythonConsole", "Object Inspector")
self.listClassMethod.setHeaderLabels([objInspLabel, ''])
self.listClassMethod.setColumnHidden(1, True)
self.listClassMethod.setAlternatingRowColors(True)
#self.splitterEditor.addWidget(self.widgetEditor)
#self.splitterObj.addWidget(self.listClassMethod)
#self.splitterObj.addWidget(self.widgetEditor)
# Hide side editor on start up
self.splitterObj.hide()
self.listClassMethod.hide()
# Hide search widget on start up
self.widgetFind.hide()
sizes = self.splitter.sizes()
self.splitter.setSizes(sizes)
##----------------Restore Settings------------------------------------
self.restoreSettingsConsole()
##------------------Toolbar Editor-------------------------------------
## Action for Open File
openFileBt = QCoreApplication.translate("PythonConsole", "Open file")
self.openFileButton = QAction(self)
self.openFileButton.setCheckable(False)
self.openFileButton.setEnabled(True)
self.openFileButton.setIcon(QgsApplication.getThemeIcon("console/iconOpenConsole.png"))
self.openFileButton.setMenuRole(QAction.PreferencesRole)
self.openFileButton.setIconVisibleInMenu(True)
self.openFileButton.setToolTip(openFileBt)
self.openFileButton.setText(openFileBt)
## Action for Save File
saveFileBt = QCoreApplication.translate("PythonConsole", "Save")
self.saveFileButton = QAction(self)
self.saveFileButton.setCheckable(False)
self.saveFileButton.setEnabled(False)
self.saveFileButton.setIcon(QgsApplication.getThemeIcon("console/iconSaveConsole.png"))
self.saveFileButton.setMenuRole(QAction.PreferencesRole)
self.saveFileButton.setIconVisibleInMenu(True)
self.saveFileButton.setToolTip(saveFileBt)
self.saveFileButton.setText(saveFileBt)
## Action for Save File As
saveAsFileBt = QCoreApplication.translate("PythonConsole", "Save As...")
self.saveAsFileButton = QAction(self)
self.saveAsFileButton.setCheckable(False)
self.saveAsFileButton.setEnabled(True)
self.saveAsFileButton.setIcon(QgsApplication.getThemeIcon("console/iconSaveAsConsole.png"))
self.saveAsFileButton.setMenuRole(QAction.PreferencesRole)
self.saveAsFileButton.setIconVisibleInMenu(True)
self.saveAsFileButton.setToolTip(saveAsFileBt)
self.saveAsFileButton.setText(saveAsFileBt)
## Action Cut
cutEditorBt = QCoreApplication.translate("PythonConsole", "Cut")
self.cutEditorButton = QAction(self)
self.cutEditorButton.setCheckable(False)
self.cutEditorButton.setEnabled(True)
self.cutEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconCutEditorConsole.png"))
self.cutEditorButton.setMenuRole(QAction.PreferencesRole)
self.cutEditorButton.setIconVisibleInMenu(True)
self.cutEditorButton.setToolTip(cutEditorBt)
self.cutEditorButton.setText(cutEditorBt)
## Action Copy
copyEditorBt = QCoreApplication.translate("PythonConsole", "Copy")
self.copyEditorButton = QAction(self)
self.copyEditorButton.setCheckable(False)
self.copyEditorButton.setEnabled(True)
self.copyEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconCopyEditorConsole.png"))
self.copyEditorButton.setMenuRole(QAction.PreferencesRole)
self.copyEditorButton.setIconVisibleInMenu(True)
self.copyEditorButton.setToolTip(copyEditorBt)
self.copyEditorButton.setText(copyEditorBt)
## Action Paste
pasteEditorBt = QCoreApplication.translate("PythonConsole", "Paste")
self.pasteEditorButton = QAction(self)
self.pasteEditorButton.setCheckable(False)
self.pasteEditorButton.setEnabled(True)
self.pasteEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconPasteEditorConsole.png"))
self.pasteEditorButton.setMenuRole(QAction.PreferencesRole)
self.pasteEditorButton.setIconVisibleInMenu(True)
self.pasteEditorButton.setToolTip(pasteEditorBt)
self.pasteEditorButton.setText(pasteEditorBt)
## Action Run Script (subprocess)
runScriptEditorBt = QCoreApplication.translate("PythonConsole", "Run script")
self.runScriptEditorButton = QAction(self)
self.runScriptEditorButton.setCheckable(False)
self.runScriptEditorButton.setEnabled(True)
self.runScriptEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconRunScriptConsole.png"))
self.runScriptEditorButton.setMenuRole(QAction.PreferencesRole)
self.runScriptEditorButton.setIconVisibleInMenu(True)
self.runScriptEditorButton.setToolTip(runScriptEditorBt)
self.runScriptEditorButton.setText(runScriptEditorBt)
## Action Run Script (subprocess)
commentEditorBt = QCoreApplication.translate("PythonConsole", "Comment")
self.commentEditorButton = QAction(self)
self.commentEditorButton.setCheckable(False)
self.commentEditorButton.setEnabled(True)
self.commentEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconCommentEditorConsole.png"))
self.commentEditorButton.setMenuRole(QAction.PreferencesRole)
self.commentEditorButton.setIconVisibleInMenu(True)
self.commentEditorButton.setToolTip(commentEditorBt)
self.commentEditorButton.setText(commentEditorBt)
## Action Run Script (subprocess)
uncommentEditorBt = QCoreApplication.translate("PythonConsole", "Uncomment")
self.uncommentEditorButton = QAction(self)
self.uncommentEditorButton.setCheckable(False)
self.uncommentEditorButton.setEnabled(True)
self.uncommentEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconUncommentEditorConsole.png"))
self.uncommentEditorButton.setMenuRole(QAction.PreferencesRole)
self.uncommentEditorButton.setIconVisibleInMenu(True)
self.uncommentEditorButton.setToolTip(uncommentEditorBt)
self.uncommentEditorButton.setText(uncommentEditorBt)
## Action for Object browser
objList = QCoreApplication.translate("PythonConsole", "Object Inspector")
self.objectListButton = QAction(self)
self.objectListButton.setCheckable(True)
self.objectListButton.setEnabled(self.settings.value("pythonConsole/enableObjectInsp",
False, type=bool))
self.objectListButton.setIcon(QgsApplication.getThemeIcon("console/iconClassBrowserConsole.png"))
self.objectListButton.setMenuRole(QAction.PreferencesRole)
self.objectListButton.setIconVisibleInMenu(True)
self.objectListButton.setToolTip(objList)
self.objectListButton.setText(objList)
## Action for Find text
findText = QCoreApplication.translate("PythonConsole", "Find Text")
self.findTextButton = QAction(self)
self.findTextButton.setCheckable(True)
self.findTextButton.setEnabled(True)
self.findTextButton.setIcon(QgsApplication.getThemeIcon("console/iconSearchEditorConsole.png"))
self.findTextButton.setMenuRole(QAction.PreferencesRole)
self.findTextButton.setIconVisibleInMenu(True)
self.findTextButton.setToolTip(findText)
self.findTextButton.setText(findText)
##----------------Toolbar Console-------------------------------------
## Action Show Editor
showEditor = QCoreApplication.translate("PythonConsole", "Show editor")
self.showEditorButton = QAction(self)
self.showEditorButton.setEnabled(True)
self.showEditorButton.setCheckable(True)
self.showEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconShowEditorConsole.png"))
self.showEditorButton.setMenuRole(QAction.PreferencesRole)
self.showEditorButton.setIconVisibleInMenu(True)
self.showEditorButton.setToolTip(showEditor)
self.showEditorButton.setText(showEditor)
## Action for Clear button
clearBt = QCoreApplication.translate("PythonConsole", "Clear console")
self.clearButton = QAction(self)
self.clearButton.setCheckable(False)
self.clearButton.setEnabled(True)
self.clearButton.setIcon(QgsApplication.getThemeIcon("console/iconClearConsole.png"))
self.clearButton.setMenuRole(QAction.PreferencesRole)
self.clearButton.setIconVisibleInMenu(True)
self.clearButton.setToolTip(clearBt)
self.clearButton.setText(clearBt)
## Action for settings
optionsBt = QCoreApplication.translate("PythonConsole", "Settings")
self.optionsButton = QAction(self)
self.optionsButton.setCheckable(False)
self.optionsButton.setEnabled(True)
self.optionsButton.setIcon(QgsApplication.getThemeIcon("console/iconSettingsConsole.png"))
self.optionsButton.setMenuRole(QAction.PreferencesRole)
self.optionsButton.setIconVisibleInMenu(True)
self.optionsButton.setToolTip(optionsBt)
self.optionsButton.setText(optionsBt)
## Action menu for class
actionClassBt = QCoreApplication.translate("PythonConsole", "Import Class")
self.actionClass = QAction(self)
self.actionClass.setCheckable(False)
self.actionClass.setEnabled(True)
self.actionClass.setIcon(QgsApplication.getThemeIcon("console/iconClassConsole.png"))
self.actionClass.setMenuRole(QAction.PreferencesRole)
self.actionClass.setIconVisibleInMenu(True)
self.actionClass.setToolTip(actionClassBt)
self.actionClass.setText(actionClassBt)
## Import Processing class
loadProcessingBt = QCoreApplication.translate("PythonConsole", "Import Processing class")
self.loadProcessingButton = QAction(self)
self.loadProcessingButton.setCheckable(False)
self.loadProcessingButton.setEnabled(True)
self.loadProcessingButton.setIcon(QgsApplication.getThemeIcon("console/iconProcessingConsole.png"))
self.loadProcessingButton.setMenuRole(QAction.PreferencesRole)
self.loadProcessingButton.setIconVisibleInMenu(True)
self.loadProcessingButton.setToolTip(loadProcessingBt)
self.loadProcessingButton.setText(loadProcessingBt)
## Import QtCore class
loadQtCoreBt = QCoreApplication.translate("PythonConsole", "Import PyQt.QtCore class")
self.loadQtCoreButton = QAction(self)
self.loadQtCoreButton.setCheckable(False)
self.loadQtCoreButton.setEnabled(True)
self.loadQtCoreButton.setIcon(QgsApplication.getThemeIcon("console/iconQtCoreConsole.png"))
self.loadQtCoreButton.setMenuRole(QAction.PreferencesRole)
self.loadQtCoreButton.setIconVisibleInMenu(True)
self.loadQtCoreButton.setToolTip(loadQtCoreBt)
self.loadQtCoreButton.setText(loadQtCoreBt)
## Import QtGui class
loadQtGuiBt = QCoreApplication.translate("PythonConsole", "Import PyQt.QtGui class")
self.loadQtGuiButton = QAction(self)
self.loadQtGuiButton.setCheckable(False)
self.loadQtGuiButton.setEnabled(True)
self.loadQtGuiButton.setIcon(QgsApplication.getThemeIcon("console/iconQtGuiConsole.png"))
self.loadQtGuiButton.setMenuRole(QAction.PreferencesRole)
self.loadQtGuiButton.setIconVisibleInMenu(True)
self.loadQtGuiButton.setToolTip(loadQtGuiBt)
self.loadQtGuiButton.setText(loadQtGuiBt)
## Action for Run script
runBt = QCoreApplication.translate("PythonConsole", "Run command")
self.runButton = QAction(self)
self.runButton.setCheckable(False)
self.runButton.setEnabled(True)
self.runButton.setIcon(QgsApplication.getThemeIcon("console/iconRunConsole.png"))
self.runButton.setMenuRole(QAction.PreferencesRole)
self.runButton.setIconVisibleInMenu(True)
self.runButton.setToolTip(runBt)
self.runButton.setText(runBt)
## Help action
helpBt = QCoreApplication.translate("PythonConsole", "Help")
self.helpButton = QAction(self)
self.helpButton.setCheckable(False)
self.helpButton.setEnabled(True)
self.helpButton.setIcon(QgsApplication.getThemeIcon("console/iconHelpConsole.png"))
self.helpButton.setMenuRole(QAction.PreferencesRole)
self.helpButton.setIconVisibleInMenu(True)
self.helpButton.setToolTip(helpBt)
self.helpButton.setText(helpBt)
self.toolBar = QToolBar()
self.toolBar.setEnabled(True)
self.toolBar.setFocusPolicy(Qt.NoFocus)
self.toolBar.setContextMenuPolicy(Qt.DefaultContextMenu)
self.toolBar.setLayoutDirection(Qt.LeftToRight)
self.toolBar.setIconSize(QSize(16, 16))
self.toolBar.setOrientation(Qt.Vertical)
self.toolBar.setMovable(True)
self.toolBar.setFloatable(True)
self.toolBar.addAction(self.clearButton)
self.toolBar.addAction(self.actionClass)
self.toolBar.addAction(self.runButton)
self.toolBar.addSeparator()
self.toolBar.addAction(self.showEditorButton)
self.toolBar.addSeparator()
self.toolBar.addAction(self.optionsButton)
self.toolBar.addAction(self.helpButton)
self.toolBarEditor = QToolBar()
# self.toolBarEditor.setStyleSheet('QToolBar{background-color: rgb(%s, %s, %s' % tuple(bkgrcolor) + ');\
# border-right: 1px solid rgb(%s, %s, %s' % tuple(bordercl) + ');}')
self.toolBarEditor.setEnabled(False)
self.toolBarEditor.setFocusPolicy(Qt.NoFocus)
self.toolBarEditor.setContextMenuPolicy(Qt.DefaultContextMenu)
self.toolBarEditor.setLayoutDirection(Qt.LeftToRight)
self.toolBarEditor.setIconSize(QSize(16, 16))
self.toolBarEditor.setOrientation(Qt.Vertical)
self.toolBarEditor.setMovable(True)
self.toolBarEditor.setFloatable(True)
self.toolBarEditor.addAction(self.openFileButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.saveFileButton)
self.toolBarEditor.addAction(self.saveAsFileButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.findTextButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.cutEditorButton)
self.toolBarEditor.addAction(self.copyEditorButton)
self.toolBarEditor.addAction(self.pasteEditorButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.commentEditorButton)
self.toolBarEditor.addAction(self.uncommentEditorButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.objectListButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.runScriptEditorButton)
## Menu Import Class
self.classMenu = QMenu()
self.classMenu.addAction(self.loadProcessingButton)
self.classMenu.addAction(self.loadQtCoreButton)
self.classMenu.addAction(self.loadQtGuiButton)
cM = self.toolBar.widgetForAction(self.actionClass)
cM.setMenu(self.classMenu)
cM.setPopupMode(QToolButton.InstantPopup)
self.widgetButton = QWidget()
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetButton.sizePolicy().hasHeightForWidth())
self.widgetButton.setSizePolicy(sizePolicy)
self.widgetButtonEditor = QWidget(self.widgetEditor)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetButtonEditor.sizePolicy().hasHeightForWidth())
self.widgetButtonEditor.setSizePolicy(sizePolicy)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.shellOut.sizePolicy().hasHeightForWidth())
self.shellOut.setSizePolicy(sizePolicy)
self.shellOut.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.shell.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
##------------ Layout -------------------------------
self.mainLayout = QGridLayout(self)
self.mainLayout.setMargin(0)
self.mainLayout.setSpacing(0)
self.mainLayout.addWidget(self.widgetButton, 0, 0, 1, 1)
self.mainLayout.addWidget(self.splitterEditor, 0, 1, 1, 1)
self.layoutEditor = QGridLayout(self.widgetEditor)
self.layoutEditor.setMargin(0)
self.layoutEditor.setSpacing(0)
self.layoutEditor.addWidget(self.widgetButtonEditor, 0, 0, 2, 1)
self.layoutEditor.addWidget(self.tabEditorWidget, 0, 1, 1, 1)
self.layoutEditor.addWidget(self.widgetFind, 1, 1, 1, 1)
self.toolBarLayout = QGridLayout(self.widgetButton)
self.toolBarLayout.setMargin(0)
self.toolBarLayout.setSpacing(0)
self.toolBarLayout.addWidget(self.toolBar)
self.toolBarEditorLayout = QGridLayout(self.widgetButtonEditor)
self.toolBarEditorLayout.setMargin(0)
self.toolBarEditorLayout.setSpacing(0)
self.toolBarEditorLayout.addWidget(self.toolBarEditor)
## Layout for the find widget
self.layoutFind = QGridLayout(self.widgetFind)
self.layoutFind.setContentsMargins(0, 0, 0, 0)
self.lineEditFind = QgsFilterLineEdit()
placeHolderTxt = QCoreApplication.translate("PythonConsole", "Enter text to find...")
if pyqtconfig.Configuration().qt_version >= 0x40700:
self.lineEditFind.setPlaceholderText(placeHolderTxt)
else:
self.lineEditFind.setToolTip(placeHolderTxt)
self.findNextButton = QToolButton()
self.findNextButton.setEnabled(False)
toolTipfindNext = QCoreApplication.translate("PythonConsole", "Find Next")
self.findNextButton.setToolTip(toolTipfindNext)
self.findNextButton.setIcon(QgsApplication.getThemeIcon("console/iconSearchNextEditorConsole.png"))
self.findNextButton.setIconSize(QSize(24, 24))
self.findNextButton.setAutoRaise(True)
self.findPrevButton = QToolButton()
self.findPrevButton.setEnabled(False)
toolTipfindPrev = QCoreApplication.translate("PythonConsole", "Find Previous")
self.findPrevButton.setToolTip(toolTipfindPrev)
self.findPrevButton.setIcon(QgsApplication.getThemeIcon("console/iconSearchPrevEditorConsole.png"))
self.findPrevButton.setIconSize(QSize(24, 24))
self.findPrevButton.setAutoRaise(True)
self.caseSensitive = QCheckBox()
caseSensTr = QCoreApplication.translate("PythonConsole", "Case Sensitive")
self.caseSensitive.setText(caseSensTr)
self.wholeWord = QCheckBox()
wholeWordTr = QCoreApplication.translate("PythonConsole", "Whole Word")
self.wholeWord.setText(wholeWordTr)
self.wrapAround = QCheckBox()
self.wrapAround.setChecked(True)
wrapAroundTr = QCoreApplication.translate("PythonConsole", "Wrap Around")
self.wrapAround.setText(wrapAroundTr)
self.layoutFind.addWidget(self.lineEditFind, 0, 1, 1, 1)
self.layoutFind.addWidget(self.findPrevButton, 0, 2, 1, 1)
self.layoutFind.addWidget(self.findNextButton, 0, 3, 1, 1)
self.layoutFind.addWidget(self.caseSensitive, 0, 4, 1, 1)
self.layoutFind.addWidget(self.wholeWord, 0, 5, 1, 1)
self.layoutFind.addWidget(self.wrapAround, 0, 6, 1, 1)
##------------ Add first Tab in Editor -------------------------------
#self.tabEditorWidget.newTabEditor(tabName='first', filename=None)
##------------ Signal -------------------------------
self.findTextButton.toggled.connect(self.findTextEditor)
self.objectListButton.toggled.connect(self.toggleObjectListWidget)
self.commentEditorButton.triggered.connect(self.commentCode)
self.uncommentEditorButton.triggered.connect(self.uncommentCode)
self.runScriptEditorButton.triggered.connect(self.runScriptEditor)
self.cutEditorButton.triggered.connect(self.cutEditor)
self.copyEditorButton.triggered.connect(self.copyEditor)
self.pasteEditorButton.triggered.connect(self.pasteEditor)
self.showEditorButton.toggled.connect(self.toggleEditor)
self.clearButton.triggered.connect(self.shellOut.clearConsole)
self.optionsButton.triggered.connect(self.openSettings)
self.loadProcessingButton.triggered.connect(self.processing)
self.loadQtCoreButton.triggered.connect(self.qtCore)
self.loadQtGuiButton.triggered.connect(self.qtGui)
self.runButton.triggered.connect(self.shell.entered)
self.openFileButton.triggered.connect(self.openScriptFile)
self.saveFileButton.triggered.connect(self.saveScriptFile)
self.saveAsFileButton.triggered.connect(self.saveAsScriptFile)
self.helpButton.triggered.connect(self.openHelp)
self.connect(self.listClassMethod, SIGNAL('itemClicked(QTreeWidgetItem*, int)'),
self.onClickGoToLine)
self.lineEditFind.returnPressed.connect(self._findText)
self.findNextButton.clicked.connect(self._findNext)
self.findPrevButton.clicked.connect(self._findPrev)
self.lineEditFind.textChanged.connect(self._textFindChanged)
def _findText(self):
self.tabEditorWidget.currentWidget().newEditor.findText(True)
def _findNext(self):
self.tabEditorWidget.currentWidget().newEditor.findText(True)
def _findPrev(self):
self.tabEditorWidget.currentWidget().newEditor.findText(False)
def _textFindChanged(self):
if self.lineEditFind.text():
self.findNextButton.setEnabled(True)
self.findPrevButton.setEnabled(True)
else:
self.lineEditFind.setStyleSheet('')
self.findNextButton.setEnabled(False)
self.findPrevButton.setEnabled(False)
def onClickGoToLine(self, item, column):
tabEditor = self.tabEditorWidget.currentWidget().newEditor
if item.text(1) == 'syntaxError':
check = tabEditor.syntaxCheck(fromContextMenu=False)
if check and not tabEditor.isReadOnly():
self.tabEditorWidget.currentWidget().save()
return
linenr = int(item.text(1))
itemName = str(item.text(0))
charPos = itemName.find(' ')
if charPos != -1:
objName = itemName[0:charPos]
else:
objName = itemName
tabEditor.goToLine(objName, linenr)
def processing(self):
self.shell.commandConsole('processing')
def qtCore(self):
self.shell.commandConsole('qtCore')
def qtGui(self):
self.shell.commandConsole('qtGui')
def toggleEditor(self, checked):
self.splitterObj.show() if checked else self.splitterObj.hide()
if not self.tabEditorWidget:
self.tabEditorWidget.enableToolBarEditor(checked)
self.tabEditorWidget.restoreTabsOrAddNew()
def toggleObjectListWidget(self, checked):
self.listClassMethod.show() if checked else self.listClassMethod.hide()
def findTextEditor(self, checked):
self.widgetFind.show() if checked else self.widgetFind.hide()
def pasteEditor(self):
self.tabEditorWidget.currentWidget().newEditor.paste()
def cutEditor(self):
self.tabEditorWidget.currentWidget().newEditor.cut()
def copyEditor(self):
self.tabEditorWidget.currentWidget().newEditor.copy()
def runScriptEditor(self):
self.tabEditorWidget.currentWidget().newEditor.runScriptCode()
def commentCode(self):
self.tabEditorWidget.currentWidget().newEditor.commentEditorCode(True)
def uncommentCode(self):
self.tabEditorWidget.currentWidget().newEditor.commentEditorCode(False)
def openScriptFile(self):
lastDirPath = self.settings.value("pythonConsole/lastDirPath", "")
openFileTr = QCoreApplication.translate("PythonConsole", "Open File")
fileList = QFileDialog.getOpenFileNames(
self, openFileTr, lastDirPath, "Script file (*.py)")
if fileList:
for pyFile in fileList:
for i in range(self.tabEditorWidget.count()):
tabWidget = self.tabEditorWidget.widget(i)
if tabWidget.path == pyFile:
self.tabEditorWidget.setCurrentWidget(tabWidget)
break
else:
tabName = QFileInfo(pyFile).fileName()
self.tabEditorWidget.newTabEditor(tabName, pyFile)
lastDirPath = QFileInfo(pyFile).path()
self.settings.setValue("pythonConsole/lastDirPath", pyFile)
self.updateTabListScript(pyFile, action='append')
def saveScriptFile(self):
tabWidget = self.tabEditorWidget.currentWidget()
try:
tabWidget.save()
except (IOError, OSError), error:
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>{0}</b> could not be saved. Error: {1}').format(tabWidget.path,
error.strerror)
self.callWidgetMessageBarEditor(msgText, 2, False)
def saveAsScriptFile(self, index=None):
tabWidget = self.tabEditorWidget.currentWidget()
if not index:
index = self.tabEditorWidget.currentIndex()
if not tabWidget.path:
pathFileName = self.tabEditorWidget.tabText(index) + '.py'
fileNone = True
else:
pathFileName = tabWidget.path
fileNone = False
saveAsFileTr = QCoreApplication.translate("PythonConsole", "Save File As")
filename = QFileDialog.getSaveFileName(self,
saveAsFileTr,
pathFileName, "Script file (*.py)")
if filename:
try:
tabWidget.save(filename)
except (IOError, OSError), error:
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>{0}</b> could not be saved. Error: {1}').format(tabWidget.path,
error.strerror)
self.callWidgetMessageBarEditor(msgText, 2, False)
if fileNone:
tabWidget.path = None
else:
tabWidget.path = pathFileName
return
if not fileNone:
self.updateTabListScript(pathFileName, action='remove')
def openHelp(self):
QgsContextHelp.run( "PythonConsole" )
def openSettings(self):
if optionsDialog(self).exec_():
self.shell.refreshSettingsShell()
self.shellOut.refreshSettingsOutput()
self.tabEditorWidget.refreshSettingsEditor()
def callWidgetMessageBar(self, text):
self.shellOut.widgetMessageBar(iface, text)
def callWidgetMessageBarEditor(self, text, level, timed):
self.tabEditorWidget.widgetMessageBar(iface, text, level, timed)
def updateTabListScript(self, script, action=None):
if action == 'remove':
self.tabListScript.remove(script)
elif action == 'append':
if not self.tabListScript:
self.tabListScript = []
if script not in self.tabListScript:
self.tabListScript.append(script)
else:
self.tabListScript = []
self.settings.setValue("pythonConsole/tabScripts",
self.tabListScript)
def saveSettingsConsole(self):
self.settings.setValue("pythonConsole/splitterConsole", self.splitter.saveState())
self.settings.setValue("pythonConsole/splitterObj", self.splitterObj.saveState())
self.settings.setValue("pythonConsole/splitterEditor", self.splitterEditor.saveState())
self.shell.writeHistoryFile(True)
def restoreSettingsConsole(self):
storedTabScripts = self.settings.value("pythonConsole/tabScripts", [])
self.tabListScript = storedTabScripts
self.splitter.restoreState(self.settings.value("pythonConsole/splitterConsole", QByteArray()))
self.splitterEditor.restoreState(self.settings.value("pythonConsole/splitterEditor", QByteArray()))
self.splitterObj.restoreState(self.settings.value("pythonConsole/splitterObj", QByteArray()))
if __name__ == '__main__':
a = QApplication(sys.argv)
console = PythonConsoleWidget()
console.show()
a.exec_()
| gpl-2.0 |
sadleader/odoo | addons/l10n_co/__openerp__.py | 256 | 1794 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) David Arnold (devCO).
# Author David Arnold (devCO), dar@devco.co
# Co-Authors Juan Pablo Aries (devCO), jpa@devco.co
# Hector Ivan Valencia Muñoz (TIX SAS)
# Nhomar Hernandez (Vauxoo)
# Humberto Ochoa (Vauxoo)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Colombian - Accounting',
'version': '0.8',
'category': 'Localization/Account Charts',
'description': 'Colombian Accounting and Tax Preconfiguration',
'author': 'David Arnold BA HSG (devCO)',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'wizard/account_wizard.xml',
],
'demo': [],
'installable': True,
}
| agpl-3.0 |
ipfire/collecty | src/collecty/plugins/processor.py | 1 | 7032 | #!/usr/bin/python3
###############################################################################
# #
# collecty - A system statistics collection daemon for IPFire #
# Copyright (C) 2012 IPFire development team #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import multiprocessing
from . import base
from ..colours import *
from ..constants import *
from ..i18n import _
class GraphTemplateProcessor(base.GraphTemplate):
name = "processor"
@property
def rrd_graph(self):
return [
# Add all used CPU cycles
"CDEF:usage=user,nice,+,sys,+,wait,+,irq,+,sirq,+,steal,+,guest,+,guest_nice,+",
# Add idle to get the total number of cycles
"CDEF:total=usage,idle,+",
# Headline
"COMMENT:%s" % EMPTY_LABEL,
"COMMENT:%s" % (COLUMN % _("Current")),
"COMMENT:%s" % (COLUMN % _("Average")),
"COMMENT:%s" % (COLUMN % _("Minimum")),
"COMMENT:%s\\j" % (COLUMN % _("Maximum")),
"CDEF:usage_p=100,usage,*,total,/",
"COMMENT: %s" % (LABEL % _("Total")),
"GPRINT:usage_p_cur:%s" % PERCENTAGE,
"GPRINT:usage_p_avg:%s" % PERCENTAGE,
"GPRINT:usage_p_min:%s" % PERCENTAGE,
"GPRINT:usage_p_max:%s\\j" % PERCENTAGE,
EMPTY_LINE,
"CDEF:user_p=100,user,*,total,/",
"AREA:user_p%s:%s" % (
transparency(CPU_USER, AREA_OPACITY),
LABEL % _("User"),
),
"GPRINT:user_p_cur:%s" % PERCENTAGE,
"GPRINT:user_p_avg:%s" % PERCENTAGE,
"GPRINT:user_p_min:%s" % PERCENTAGE,
"GPRINT:user_p_max:%s\\j" % PERCENTAGE,
"CDEF:nice_p=100,nice,*,total,/",
"AREA:nice_p%s:%s:STACK" % (
transparency(CPU_NICE, AREA_OPACITY),
LABEL % _("Nice"),
),
"GPRINT:nice_p_cur:%s" % PERCENTAGE,
"GPRINT:nice_p_avg:%s" % PERCENTAGE,
"GPRINT:nice_p_min:%s" % PERCENTAGE,
"GPRINT:nice_p_max:%s\\j" % PERCENTAGE,
"CDEF:sys_p=100,sys,*,total,/",
"AREA:sys_p%s:%s:STACK" % (
transparency(CPU_SYS, AREA_OPACITY),
LABEL % _("System"),
),
"GPRINT:sys_p_cur:%s" % PERCENTAGE,
"GPRINT:sys_p_avg:%s" % PERCENTAGE,
"GPRINT:sys_p_min:%s" % PERCENTAGE,
"GPRINT:sys_p_max:%s\\j" % PERCENTAGE,
"CDEF:wait_p=100,wait,*,total,/",
"AREA:wait_p%s:%s:STACK" % (
transparency(CPU_WAIT, AREA_OPACITY),
LABEL % _("Wait"),
),
"GPRINT:wait_p_cur:%s" % PERCENTAGE,
"GPRINT:wait_p_avg:%s" % PERCENTAGE,
"GPRINT:wait_p_min:%s" % PERCENTAGE,
"GPRINT:wait_p_max:%s\\j" % PERCENTAGE,
"CDEF:irq_p=100,irq,*,total,/",
"AREA:irq_p%s:%s:STACK" % (
transparency(CPU_IRQ, AREA_OPACITY),
LABEL % _("Interrupt"),
),
"GPRINT:irq_p_cur:%s" % PERCENTAGE,
"GPRINT:irq_p_avg:%s" % PERCENTAGE,
"GPRINT:irq_p_min:%s" % PERCENTAGE,
"GPRINT:irq_p_max:%s\\j" % PERCENTAGE,
"CDEF:sirq_p=100,sirq,*,total,/",
"AREA:sirq_p%s:%s:STACK" % (
transparency(CPU_SIRQ, AREA_OPACITY),
LABEL % _("Soft Interrupt"),
),
"GPRINT:sirq_p_cur:%s" % PERCENTAGE,
"GPRINT:sirq_p_avg:%s" % PERCENTAGE,
"GPRINT:sirq_p_min:%s" % PERCENTAGE,
"GPRINT:sirq_p_max:%s\\j" % PERCENTAGE,
"CDEF:steal_p=100,steal,*,total,/",
"AREA:steal_p%s:%s:STACK" % (
transparency(CPU_STEAL, AREA_OPACITY),
LABEL % _("Steal"),
),
"GPRINT:steal_p_cur:%s" % PERCENTAGE,
"GPRINT:steal_p_avg:%s" % PERCENTAGE,
"GPRINT:steal_p_min:%s" % PERCENTAGE,
"GPRINT:steal_p_max:%s\\j" % PERCENTAGE,
"CDEF:guest_p=100,guest,*,total,/",
"AREA:guest_p%s:%s:STACK" % (
transparency(CPU_GUEST, AREA_OPACITY),
LABEL % _("Guest"),
),
"GPRINT:guest_p_cur:%s" % PERCENTAGE,
"GPRINT:guest_p_avg:%s" % PERCENTAGE,
"GPRINT:guest_p_min:%s" % PERCENTAGE,
"GPRINT:guest_p_max:%s\\j" % PERCENTAGE,
"CDEF:guest_nice_p=100,guest_nice,*,total,/",
"AREA:guest_nice_p%s:%s:STACK" % (
transparency(CPU_GUEST_NICE, AREA_OPACITY),
LABEL % _("Guest Nice"),
),
"GPRINT:guest_nice_p_cur:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_avg:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_min:%s" % PERCENTAGE,
"GPRINT:guest_nice_p_max:%s\\j" % PERCENTAGE,
"CDEF:idle_p=100,idle,*,total,/",
"AREA:idle_p%s::STACK" % CPU_IDLE,
# Draw contour lines
"LINE:user_p%s" % CPU_USER,
"LINE:nice_p%s::STACK" % CPU_NICE,
"LINE:sys_p%s::STACK" % CPU_SYS,
"LINE:wait_p%s::STACK" % CPU_WAIT,
"LINE:irq_p%s::STACK" % CPU_IRQ,
"LINE:sirq_p%s::STACK" % CPU_SIRQ,
"LINE:steal_p%s::STACK" % CPU_STEAL,
"LINE:guest_p%s::STACK" % CPU_GUEST,
"LINE:guest_nice_p%s::STACK" % CPU_GUEST_NICE,
]
upper_limit = 100
lower_limit = 0
@property
def graph_title(self):
return _("Processor Usage")
@property
def graph_vertical_label(self):
return _("Percent")
class ProcessorObject(base.Object):
rrd_schema = [
"DS:user:DERIVE:0:U",
"DS:nice:DERIVE:0:U",
"DS:sys:DERIVE:0:U",
"DS:idle:DERIVE:0:U",
"DS:wait:DERIVE:0:U",
"DS:irq:DERIVE:0:U",
"DS:sirq:DERIVE:0:U",
"DS:steal:DERIVE:0:U",
"DS:guest:DERIVE:0:U",
"DS:guest_nice:DERIVE:0:U",
]
def init(self, cpu_id=None):
self.cpu_id = cpu_id
@property
def id(self):
if self.cpu_id is not None:
return "%s" % self.cpu_id
return "default"
def collect(self):
"""
Reads the CPU usage.
"""
stat = self.read_proc_stat()
if self.cpu_id is None:
values = stat.get("cpu")
else:
values = stat.get("cpu%s" % self.cpu_id)
# Convert values into a list
values = values.split()
if not len(values) == len(self.rrd_schema):
raise ValueError("Received unexpected output from /proc/stat: %s" % values)
return values
class ProcessorPlugin(base.Plugin):
name = "processor"
description = "Processor Usage Plugin"
templates = [GraphTemplateProcessor]
@property
def objects(self):
yield ProcessorObject(self)
num = multiprocessing.cpu_count()
for i in range(num):
yield ProcessorObject(self, cpu_id=i)
| gpl-3.0 |
sinbazhou/odoo | addons/l10n_be/__openerp__.py | 251 | 3666 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Accounting',
'version': '1.1',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Belgium in OpenERP.
==============================================================================
After installing this module, the Configuration wizard for accounting is launched.
* We have the account templates which can be helpful to generate Charts of Accounts.
* On that particular wizard, you will be asked to pass the name of the company,
the chart template to follow, the no. of digits to generate, the code for your
account and bank account, currency to create journals.
Thus, the pure copy of Chart Template is generated.
Wizards provided by this module:
--------------------------------
* Partner VAT Intra: Enlist the partners with their related VAT and invoiced
amounts. Prepares an XML file format.
**Path to access :** Invoicing/Reporting/Legal Reports/Belgium Statements/Partner VAT Intra
* Periodical VAT Declaration: Prepares an XML file for Vat Declaration of
the Main company of the User currently Logged in.
**Path to access :** Invoicing/Reporting/Legal Reports/Belgium Statements/Periodical VAT Declaration
* Annual Listing Of VAT-Subjected Customers: Prepares an XML file for Vat
Declaration of the Main company of the User currently Logged in Based on
Fiscal year.
**Path to access :** Invoicing/Reporting/Legal Reports/Belgium Statements/Annual Listing Of VAT-Subjected Customers
""",
'author': 'Noviat & OpenERP SA',
'depends': [
'account',
'base_vat',
'base_iban',
'account_chart',
'l10n_be_coda',
'l10n_multilang',
],
'data': [
'account_financial_report.xml',
'account_pcmn_belgium.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_chart_template.yml',
'account_tax_template.xml',
'wizard/l10n_be_account_vat_declaration_view.xml',
'wizard/l10n_be_vat_intra_view.xml',
'wizard/l10n_be_partner_vat_listing.xml',
'wizard/account_wizard.xml',
'l10n_be_sequence.xml',
'l10n_be_reports.xml',
'fiscal_templates.xml',
'account_fiscal_position_tax_template.xml',
'security/ir.model.access.csv',
'views/report_vatintraprint.xml',
'views/report_vatpartnerlisting.xml',
],
'demo': [],
'installable': True,
'website': 'https://www.odoo.com/page/accounting',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jollyroger/debian-buildbot-slave | buildslave/test/unit/test_commands_p4.py | 3 | 6638 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildslave.commands import p4
from buildslave.test.fake.runprocess import Expect
from buildslave.test.util.sourcecommand import SourceCommandTestMixin
from buildslave.util import Obfuscated
class TestP4(SourceCommandTestMixin, unittest.TestCase):
def setUp(self):
self.setUpCommand()
def tearDown(self):
self.tearDownCommand()
def test_simple(self):
self.patch_getCommand('p4', 'path/to/p4')
self.clean_environ()
self.make_command(p4.P4, dict(
workdir='workdir',
mode='copy',
revision=None,
p4port='p4dserv:1666',
p4client='buildbot_test_10',
p4user='jimmy',
p4passwd='hushnow',
p4base='//mydepot/myproj/',
branch='mytrunk',
p4extra_views=[],
))
exp_environ = dict(PWD='.', LC_MESSAGES='C')
# can't use textwrap.dedent here, because in 2.4 it converts \t to 8x' '
client_spec = """\
Client: buildbot_test_10
Owner: jimmy
Description:
\tCreated by jimmy
Root:\t%s
Options:\tallwrite rmdir
LineEnd:\tlocal
View:
\t//mydepot/myproj/mytrunk/... //buildbot_test_10/source/...
""" % self.basedir
expects = [
Expect(['clobber', 'workdir'],
self.basedir)
+ 0,
Expect(['clobber', 'source'],
self.basedir)
+ 0,
Expect(['p4', '-p', 'p4dserv:1666', '-u', 'jimmy', '-P',
Obfuscated('hushnow', 'XXXXXXXX'), 'client', '-i'],
self.basedir,
# TODO: empty env?
sendRC=False, timeout=120, usePTY=False, environ={},
initialStdin=client_spec)
+ 0,
Expect(['p4', '-p', 'p4dserv:1666', '-u', 'jimmy', '-P',
Obfuscated('hushnow', 'XXXXXXXX'), '-c', 'buildbot_test_10', 'sync', '-f'],
self.basedir,
# TODO: empty env?
sendRC=False, timeout=120, usePTY=False, environ={})
+ 0,
Expect(['p4', '-p', 'p4dserv:1666', '-u', 'jimmy', '-P',
Obfuscated('hushnow', 'XXXXXXXX'), '-c', 'buildbot_test_10', 'changes',
'-s', 'submitted', '-m', '1', '#have'],
self.basedir,
sendRC=False, timeout=120, usePTY=False, environ=exp_environ,
keepStdout=True)
+ {'stdout': 'Change 28147 on 2008/04/07 by p4user@hostname\n'}
+ 0,
Expect(['copy', 'source', 'workdir'],
self.basedir)
+ 0,
]
self.patch_runprocess(*expects)
d = self.run_command()
d.addCallback(self.check_sourcedata, "['p4dserv:1666', 'buildbot_test_10', " +
"'//mydepot/myproj/', 'mytrunk', [], None, %s, 'copy', 'workdir']"
% repr(self.basedir))
return d
def test_simple_unicode_args(self):
self.patch_getCommand('p4', 'path/to/p4')
self.clean_environ()
self.make_command(p4.P4, dict(
workdir='workdir',
mode='copy',
revision=None,
p4port=u'p4dserv:1666\N{SNOWMAN}',
p4client=u'buildbot_test_10\N{SNOWMAN}',
p4user='jimmy',
p4passwd='hushnow',
p4base=u'//mydepot/myproj/\N{SNOWMAN}',
branch=u'mytrunk\N{SNOWMAN}',
p4extra_views=[],
))
exp_environ = dict(PWD='.', LC_MESSAGES='C')
# can't use textwrap.dedent here, because in 2.4 it converts \t to 8x' '
client_spec = """\
Client: buildbot_test_10
Owner: jimmy
Description:
\tCreated by jimmy
Root:\t%s
Options:\tallwrite rmdir
LineEnd:\tlocal
View:
\t//mydepot/myproj/mytrunk/... //buildbot_test_10/source/...
""" % self.basedir
expects = [
Expect(['clobber', 'workdir'],
self.basedir)
+ 0,
Expect(['clobber', 'source'],
self.basedir)
+ 0,
Expect(['p4', '-p', u'p4dserv:1666\N{SNOWMAN}', '-u', 'jimmy', '-P',
Obfuscated('hushnow', 'XXXXXXXX'), 'client', '-i'],
self.basedir,
# TODO: empty env?
sendRC=False, timeout=120, usePTY=False, environ={},
initialStdin=client_spec)
+ 0,
Expect(['p4', '-p', u'p4dserv:1666\N{SNOWMAN}', '-u', 'jimmy', '-P',
Obfuscated('hushnow', 'XXXXXXXX'), '-c',
u'buildbot_test_10\N{SNOWMAN}', 'sync', '-f'],
self.basedir,
# TODO: empty env?
sendRC=False, timeout=120, usePTY=False, environ={})
+ 0,
Expect(['p4', '-p', u'p4dserv:1666\N{SNOWMAN}', '-u', 'jimmy', '-P',
Obfuscated('hushnow', 'XXXXXXXX'), '-c',
u'buildbot_test_10\N{SNOWMAN}', 'changes',
'-s', 'submitted', '-m', '1', '#have'],
self.basedir,
sendRC=False, timeout=120, usePTY=False, environ=exp_environ,
keepStdout=True)
+ {'stdout': 'Change 28147 on 2008/04/07 by p4user@hostname\n'}
+ 0,
Expect(['copy', 'source', 'workdir'],
self.basedir)
+ 0,
]
self.patch_runprocess(*expects)
d = self.run_command()
d.addCallback(self.check_sourcedata,
"['p4dserv:1666\\xe2\\x98\\x83', "
"'buildbot_test_10\\xe2\\x98\\x83', "
"'//mydepot/myproj/\\xe2\\x98\\x83', "
"'mytrunk\\xe2\\x98\\x83', [], None, %s, 'copy', "
"'workdir']"
% repr(self.basedir))
return d
| gpl-2.0 |
ovnicraft/server-tools | mail_environment/models/ir_mail_server.py | 2 | 1777 | # -*- coding: utf-8 -*-
# Copyright 2012-2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import api, fields, models
from odoo.addons.server_environment import serv_config
class IrMailServer(models.Model):
_inherit = "ir.mail_server"
smtp_host = fields.Char(compute='_compute_server_env',
required=False,
readonly=True)
smtp_port = fields.Integer(compute='_compute_server_env',
required=False,
readonly=True)
smtp_user = fields.Char(compute='_compute_server_env',
required=False,
readonly=True)
smtp_pass = fields.Char(compute='_compute_server_env',
required=False,
readonly=True)
smtp_encryption = fields.Selection(compute='_compute_server_env',
required=False,
readonly=True)
@api.depends()
def _compute_server_env(self):
for server in self:
global_section_name = 'outgoing_mail'
# default vals
config_vals = {'smtp_port': 587}
if serv_config.has_section(global_section_name):
config_vals.update((serv_config.items(global_section_name)))
custom_section_name = '.'.join((global_section_name, server.name))
if serv_config.has_section(custom_section_name):
config_vals.update(serv_config.items(custom_section_name))
if config_vals.get('smtp_port'):
config_vals['smtp_port'] = int(config_vals['smtp_port'])
server.update(config_vals)
| agpl-3.0 |
rmfitzpatrick/ansible | lib/ansible/modules/windows/win_unzip.py | 29 | 4436 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_unzip
version_added: "2.0"
short_description: Unzips compressed files and archives on the Windows node
description:
- Unzips compressed files and archives.
- Supports .zip files natively
- Supports other formats supported by the Powershell Community Extensions (PSCX) module (basically everything 7zip supports)
- For non-Windows targets, use the M(unarchive) module instead.
requirements:
- PSCX
options:
src:
description:
- File to be unzipped (provide absolute path).
required: true
dest:
description:
- Destination of zip file (provide absolute path of directory). If it does not exist, the directory will be created.
required: true
delete_archive:
description:
- Remove the zip file, after unzipping.
type: bool
default: 'no'
aliases: [ rm ]
recurse:
description:
- Recursively expand zipped files within the src file.
type: bool
default: 'no'
creates:
description:
- If this file or directory exists the specified src will not be extracted.
notes:
- This module is not really idempotent, it will extract the archive every time, and report a change.
- For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX)
has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination
directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
- For non-Windows targets, use the M(unarchive) module instead.
author:
- Phil Schwartz (@schwartzmx)
'''
EXAMPLES = r'''
# This unzips a library that was downloaded with win_get_url, and removes the file after extraction
# $ ansible -i hosts -m win_unzip -a "src=C:\LibraryToUnzip.zip dest=C:\Lib remove=true" all
- name: Unzip a bz2 (BZip) file
win_unzip:
src: C:\Users\Phil\Logs.bz2
dest: C:\Users\Phil\OldLogs
creates: C:\Users\Phil\OldLogs
- name: Unzip gz log
win_unzip:
src: C:\Logs\application-error-logs.gz
dest: C:\ExtractedLogs\application-error-logs
# Unzip .zip file, recursively decompresses the contained .gz files and removes all unneeded compressed files after completion.
- name: Unzip ApplicationLogs.zip and decompress all GZipped log files
hosts: all
gather_facts: false
tasks:
- name: Recursively decompress GZ files in ApplicationLogs.zip
win_unzip:
src: C:\Downloads\ApplicationLogs.zip
dest: C:\Application\Logs
recurse: yes
delete_archive: yes
# Install PSCX to use for extracting a gz file
- name: Grab PSCX msi
win_get_url:
url: http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959
dest: C:\Windows\Temp\pscx.msi
- name: Install PSCX
win_msi:
path: C:\Windows\Temp\pscx.msi
'''
RETURN = r'''
dest:
description: The provided destination path
returned: always
type: string
sample: C:\ExtractedLogs\application-error-logs
removed:
description: Whether the module did remove any files during task run
returned: always
type: boolean
sample: True
src:
description: The provided source path
returned: always
type: string
sample: C:\Logs\application-error-logs.gz
'''
| gpl-3.0 |
andela-ooladayo/django | tests/auth_tests/test_context_processors.py | 269 | 6773 | import datetime
from django.contrib.auth import authenticate
from django.contrib.auth.context_processors import PermLookupDict, PermWrapper
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.test import SimpleTestCase, TestCase, override_settings
from .settings import AUTH_MIDDLEWARE_CLASSES, AUTH_TEMPLATES
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(SimpleTestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertIn('mockapp', perms)
self.assertNotIn('nonexisting', perms)
self.assertIn('mockapp.someperm', perms)
self.assertNotIn('mockapp.nonexisting', perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False, # required for loading the fixture
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| bsd-3-clause |
retomerz/intellij-community | python/lib/Lib/encodings/iso8859_3.py | 593 | 13345 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
SWRG/ESWC2015-paper-evaluation | tests/yen_algorithms_test.py | 1 | 1681 | import sys,os
sys.path.insert(0,os.path.abspath(__file__+"/../.."))
import yenalgo2,corefunctions
import networkx as nx
import unittest
class KnownValues(unittest.TestCase):
g = nx.Graph()
g.add_edge(1,2,{'weight':1})
g.add_edge(1,3,{'weight':2})
g.add_edge(1,4,{'weight':3})
g.add_edge(2,5,{'weight':2})
g.add_edge(2,6,{'weight':1})
g.add_edge(3,7,{'weight':1})
g.add_edge(3,8,{'weight':3})
g.add_edge(3,9,{'weight':4})
g.add_edge(3,10,{'weight':1})
g.add_edge(4,10,{'weight':2})
g.add_edge(4,11,{'weight':2})
g.add_edge(5,12,{'weight':1})
g.add_edge(6,13,{'weight':2})
g.add_edge(10,14,{'weight':2})
g.add_edge(14,15,{'weight':2})
(s,t)=(3,15)
knownValuesYen = (
((1,2),[(1.0, [1, 2])]),
((3,15),[(5.0, [3, 10, 14, 15]), (11.0, [3, 1, 4, 10, 14, 15])]),
((1,15),[(7.0, [1, 3, 10, 14, 15]), (9.0, [1, 4, 10, 14, 15])]),
((4,15),[(6.0, [4, 10, 14, 15]), (10.0, [4, 1, 3, 10, 14, 15])])
)
def test_YenKSP_generator_KnownValues(self):
"""YenKSP_generator should give known result with known input"""
for ((source,target), expected_result) in self.knownValuesYen:
result = [p for p in corefunctions.YenKSP_generator(self.g,source,target)]
self.assertEqual(expected_result, result)
def test_yenalgo2_KnownValues(self):
"""yenalgo2 should give known result with known input"""
for ((source,target), expected_result) in self.knownValuesYen:
result = [p for p in yenalgo2.k_shortest_paths(self.g,source,target,4)]
self.assertEqual(expected_result, result)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
cloudify-cosmo/cloudify-nsx-plugin | cloudify_nsx/network/dhcp_bind.py | 1 | 5021 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
import cloudify_nsx.library.nsx_common as common
from cloudify import exceptions as cfy_exc
import cloudify_nsx.library.nsx_esg_dlr as nsx_dhcp
@operation
def create(**kwargs):
validation_rules = {
"esg_id": {
"required": True
},
"vm_id": {
"set_none": True
},
"vnic_id": {
"set_none": True,
"type": "string"
},
"mac": {
"set_none": True
},
"hostname": {
"required": True
},
"ip": {
"required": True
},
"default_gateway": {
"set_none": True
},
"subnet_mask": {
"set_none": True
},
"domain_name": {
"set_none": True
},
"dns_server_1": {
"set_none": True
},
"dns_server_2": {
"set_none": True
},
"lease_time": {
"set_none": True
},
"auto_dns": {
"set_none": True
}
}
use_existing, bind_dict = common.get_properties_and_validate(
'bind', kwargs, validation_rules
)
if use_existing:
ctx.logger.info("Used pre existed!")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if resource_id:
ctx.logger.info("Reused %s" % resource_id)
return
# credentials
client_session = common.nsx_login(kwargs)
if bind_dict.get('mac'): # if NONE skip this part
resource_id = nsx_dhcp.add_mac_binding(client_session,
bind_dict['esg_id'],
bind_dict['mac'],
bind_dict['hostname'],
bind_dict['ip'],
bind_dict['default_gateway'],
bind_dict['subnet_mask'],
bind_dict['domain_name'],
bind_dict['dns_server_1'],
bind_dict['dns_server_2'],
bind_dict['lease_time'],
bind_dict['auto_dns'])
elif bind_dict.get('vnic_id') is not None and bind_dict.get('vm_id'):
resource_id = nsx_dhcp.add_vm_binding(client_session,
bind_dict['esg_id'],
bind_dict['vm_id'],
bind_dict['vnic_id'],
bind_dict['hostname'],
bind_dict['ip'],
bind_dict['default_gateway'],
bind_dict['subnet_mask'],
bind_dict['domain_name'],
bind_dict['dns_server_1'],
bind_dict['dns_server_2'],
bind_dict['lease_time'],
bind_dict['auto_dns'])
else:
raise cfy_exc.NonRecoverableError(
"Please fill vm_id/vnic_id or mac"
)
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.logger.info("Binded %s | %s" % (resource_id, bind_dict))
@operation
def delete(**kwargs):
use_existing, bind_dict = common.get_properties('bind', kwargs)
if use_existing:
common.remove_properties('bind')
ctx.logger.info("Used pre existed!")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if not resource_id:
common.remove_properties('bind')
ctx.logger.info("We dont have resource_id")
return
# credentials
client_session = common.nsx_login(kwargs)
common.attempt_with_rerun(
nsx_dhcp.delete_dhcp_binding,
client_session=client_session,
resource_id=resource_id
)
ctx.logger.info("deleted %s" % resource_id)
common.remove_properties('bind')
| apache-2.0 |
dbhirko/ansible-modules-extras | system/ufw.py | 96 | 9822 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ahti Kitsik <ak@ahtik.com>
# (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
# (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
# (c) 2013, James Martin <jmartin@basho.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
version_added: 1.6
author:
- "Aleksey Ovcharenko (@ovcharenko)"
- "Jarno Keskikangas (@pyykkis)"
- "Ahti Kitsik (@ahtik)"
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
required: false
choices: ['enabled', 'disabled', 'reloaded', 'reset']
policy:
description:
- Change the default policy for incoming or outgoing traffic.
required: false
alias: default
choices: ['allow', 'deny', 'reject']
direction:
description:
- Select direction for a rule or default policy command.
required: false
choices: ['in', 'out', 'incoming', 'outgoing', 'routed']
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
choices: ['on', 'off', 'low', 'medium', 'high', 'full']
required: false
insert:
description:
- Insert the corresponding rule as rule number NUM
required: false
rule:
description:
- Add firewall rule
required: false
choices: ['allow', 'deny', 'reject', 'limit']
log:
description:
- Log new connections matched to this rule
required: false
choices: ['yes', 'no']
from_ip:
description:
- Source IP address.
required: false
aliases: ['from', 'src']
default: 'any'
from_port:
description:
- Source port.
required: false
to_ip:
description:
- Destination IP address.
required: false
aliases: ['to', 'dest']
default: 'any'
to_port:
description:
- Destination port.
required: false
aliases: ['port']
proto:
description:
- TCP/IP protocol.
choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']
required: false
name:
description:
- Use profile located in C(/etc/ufw/applications.d)
required: false
aliases: ['app']
delete:
description:
- Delete rule.
required: false
choices: ['yes', 'no']
interface:
description:
- Specify interface for rule.
required: false
aliases: ['if']
route:
description:
- Apply the rule to routed/forwarded packets.
required: false
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Allow everything and enable UFW
ufw: state=enabled policy=allow
# Set logging
ufw: logging=on
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
ufw: rule=reject port=auth log=yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
ufw: rule=limit port=ssh proto=tcp
# Allow OpenSSH
ufw: rule=allow name=OpenSSH
# Delete OpenSSH rule
ufw: rule=allow name=OpenSSH delete=yes
# Deny all access to port 53:
ufw: rule=deny port=53
# Allow all access to tcp port 80:
ufw: rule=allow port=80 proto=tcp
# Allow all access from RFC1918 networks to this host:
ufw: rule=allow src={{ item }}
with_items:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
# Deny access to udp port 514 from host 1.2.3.4:
ufw: rule=deny proto=udp src=1.2.3.4 port=514
# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469
# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host.
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
ufw: rule=deny proto=tcp src=2001:db8::/32 port=25
# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24.
# Can be used to further restrict a global FORWARD policy set to allow
ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24
'''
from operator import itemgetter
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']),
default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']),
direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete = dict(default=False, type='bool'),
route = dict(default=False, type='bool'),
insert = dict(default=None),
rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']),
interface = dict(default=None, aliases=['if']),
log = dict(default=False, type='bool'),
from_ip = dict(default='any', aliases=['src', 'from']),
from_port = dict(default=None),
to_ip = dict(default='any', aliases=['dest', 'to']),
to_port = dict(default=None, aliases=['port']),
proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']),
app = dict(default=None, aliases=['name'])
),
supports_check_mode = True,
mutually_exclusive = [['app', 'proto', 'logging']]
)
cmds = []
def execute(cmd):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg=err or out)
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if('interface' in params and 'direction' not in params):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
# Save the pre state and rules in order to recognize changes
(_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules")
# Execute commands
for (command, value) in commands.iteritems():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = { 'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset' }
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
execute(cmd + [[command], [value]])
elif command == 'default':
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
# Rules are constructed according to the long format
#
# ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application]
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('direction', "%s" ), ('interface', "on %s" ),
('from_ip', "from %s" ), ('from_port', "port %s" ),
('to_ip', "to %s" ), ('to_port', "port %s" ),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
execute(cmd)
# Get the new state
(_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules")
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
cajone/pychess | lib/pychess/widgets/pydock/PyDockTop.py | 1 | 9627 | from __future__ import absolute_import
from __future__ import print_function
import os
from xml.dom import minidom
from collections import defaultdict
from pychess.System.prefix import addDataPrefix
from .PyDockLeaf import PyDockLeaf
from .PyDockComposite import PyDockComposite
from .ArrowButton import ArrowButton
from .HighlightArea import HighlightArea
from .__init__ import TabReceiver
from .__init__ import NORTH, EAST, SOUTH, WEST, CENTER
class PyDockTop(PyDockComposite, TabReceiver):
def __init__(self, id, perspective):
TabReceiver.__init__(self, perspective)
self.id = id
self.perspective = perspective
self.set_no_show_all(True)
self.highlightArea = HighlightArea(self)
self.button_cids = defaultdict(list)
self.buttons = (
ArrowButton(self, addDataPrefix("glade/dock_top.svg"), NORTH),
ArrowButton(self, addDataPrefix("glade/dock_right.svg"), EAST),
ArrowButton(self, addDataPrefix("glade/dock_bottom.svg"), SOUTH),
ArrowButton(self, addDataPrefix("glade/dock_left.svg"), WEST))
for button in self.buttons:
self.button_cids[button] += [
button.connect("dropped", self.__onDrop),
button.connect("hovered", self.__onHover),
button.connect("left", self.__onLeave),
]
def _del(self):
self.highlightArea.disconnect(self.highlightArea.cid)
for button in self.buttons:
for cid in self.button_cids[button]:
button.disconnect(cid)
button.myparent = None
self.button_cids = {}
self.highlightArea.myparent = None
#self.buttons = None
#self.highlightArea = None
TabReceiver._del(self)
PyDockComposite._del(self)
def getPosition(self):
return CENTER
def __repr__(self):
return "top (%s)" % self.id
# ===========================================================================
# Component stuff
# ===========================================================================
def addComponent(self, widget):
self.add(widget)
widget.show()
def changeComponent(self, old, new):
self.removeComponent(old)
self.addComponent(new)
def removeComponent(self, widget):
self.remove(widget)
def getComponents(self):
child = self.get_child()
if isinstance(child, PyDockComposite) or isinstance(child, PyDockLeaf):
return [child]
return []
def dock(self, widget, position, title, id):
if not self.getComponents():
leaf = PyDockLeaf(widget, title, id, self.perspective)
self.addComponent(leaf)
return leaf
else:
return self.get_child().dock(widget, position, title, id)
def clear(self):
self.remove(self.get_child())
# ===========================================================================
# Signals
# ===========================================================================
def showArrows(self):
for button in self.buttons:
button._calcSize()
button.show()
def hideArrows(self):
for button in self.buttons:
button.hide()
self.highlightArea.hide()
def __onDrop(self, arrowButton, sender):
self.highlightArea.hide()
child = sender.get_nth_page(sender.get_current_page())
title, id = sender.get_parent().undock(child)
self.dock(child, arrowButton.myposition, title, id)
def __onHover(self, arrowButton, widget):
self.highlightArea.showAt(arrowButton.myposition)
arrowButton.get_window().raise_()
def __onLeave(self, arrowButton):
self.highlightArea.hide()
# ===========================================================================
# XML
# ===========================================================================
def saveToXML(self, xmlpath):
"""
<docks>
<dock id="x">
<v pos="200">
<leaf current="x" dockable="False">
<panel id="x" />
</leaf>
<h pos="200">
<leaf current="y" dockable="True">
<panel id="y" />
<panel id="z" />
</leaf>
<leaf current="y" dockable="True">
<panel id="y" />
</leaf>
</h>
</v>
</dock>
</docks>
"""
dockElem = None
if os.path.isfile(xmlpath):
doc = minidom.parse(xmlpath)
for elem in doc.getElementsByTagName("dock"):
if elem.getAttribute("id") == self.id:
for node in elem.childNodes:
elem.removeChild(node)
dockElem = elem
break
if not dockElem:
doc = minidom.getDOMImplementation().createDocument(None, "docks",
None)
dockElem = doc.createElement("dock")
dockElem.setAttribute("id", self.id)
doc.documentElement.appendChild(dockElem)
if self.get_child():
self.__addToXML(self.get_child(), dockElem, doc)
f_handle = open(xmlpath, "w")
doc.writexml(f_handle)
f_handle.close()
doc.unlink()
def __addToXML(self, component, parentElement, document):
if isinstance(component, PyDockComposite):
pos = component.paned.get_position()
if component.getPosition() in (NORTH, SOUTH):
childElement = document.createElement("v")
size = float(component.get_allocation().height)
else:
childElement = document.createElement("h")
size = float(component.get_allocation().width)
# if component.getPosition() in (NORTH, SOUTH):
# print "saving v position as %s out of %s (%s)" % (str(pos), str(size), str(pos/max(size,pos)))
childElement.setAttribute("pos", str(pos / max(size, pos)))
self.__addToXML(component.getComponents()[0], childElement,
document)
self.__addToXML(component.getComponents()[1], childElement,
document)
elif isinstance(component, PyDockLeaf):
childElement = document.createElement("leaf")
childElement.setAttribute("current", component.getCurrentPanel())
childElement.setAttribute("dockable", str(component.isDockable()))
for panel, title, id in component.getPanels():
element = document.createElement("panel")
element.setAttribute("id", id)
childElement.appendChild(element)
parentElement.appendChild(childElement)
def loadFromXML(self, xmlpath, idToWidget):
""" idTowidget is a dictionary {id: (widget,title)}
asserts that self.id is in the xmlfile """
doc = minidom.parse(xmlpath)
for elem in doc.getElementsByTagName("dock"):
if elem.getAttribute("id") == self.id:
break
else:
raise AttributeError(
"XML file contains no <dock> elements with id '%s'" % self.id)
child = [n for n in elem.childNodes if isinstance(n, minidom.Element)]
if child:
self.addComponent(self.__createWidgetFromXML(child[0], idToWidget))
def __createWidgetFromXML(self, parentElement, idToWidget):
children = [n
for n in parentElement.childNodes
if isinstance(n, minidom.Element)]
if parentElement.tagName in ("h", "v"):
child1, child2 = children
if parentElement.tagName == "h":
new = PyDockComposite(EAST, self.perspective)
else:
new = PyDockComposite(SOUTH, self.perspective)
new.initChildren(
self.__createWidgetFromXML(child1, idToWidget),
self.__createWidgetFromXML(child2, idToWidget),
preserve_dimensions=True)
def cb(widget, event, pos):
allocation = widget.get_allocation()
if parentElement.tagName == "h":
widget.set_position(int(allocation.width * pos))
else:
# print "loading v position as %s out of %s (%s)" % \
# (int(allocation.height * pos), str(allocation.height), str(pos))
widget.set_position(int(allocation.height * pos))
widget.disconnect(conid)
conid = new.paned.connect("size-allocate", cb, float(parentElement.getAttribute("pos")))
return new
elif parentElement.tagName == "leaf":
id = children[0].getAttribute("id")
title, widget = idToWidget[id]
leaf = PyDockLeaf(widget, title, id, self.perspective)
for panelElement in children[1:]:
id = panelElement.getAttribute("id")
title, widget = idToWidget[id]
leaf.dock(widget, CENTER, title, id)
leaf.setCurrentPanel(parentElement.getAttribute("current"))
if parentElement.getAttribute("dockable").lower() == "false":
leaf.setDockable(False)
return leaf
| gpl-3.0 |
Nicop06/ansible | test/units/modules/packaging/os/test_rhn_register.py | 18 | 11684 | import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import PropertyMock, patch, mock_open
from ansible.module_utils import basic
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.modules.packaging.os import rhn_register
from .rhn_utils import (set_module_args, AnsibleExitJson, AnsibleFailJson,
exit_json, fail_json, get_method_name, mock_request)
SYSTEMID = """<?xml version="1.0"?>
<params>
<param>
<value><struct>
<member>
<name>system_id</name>
<value><string>ID-123456789</string></value>
</member>
</struct></value>
</param>
</params>
"""
def skipWhenAllModulesMissing(modules):
"""Skip the decorated test unless one of modules is available."""
for module in modules:
try:
__import__(module)
return lambda func: func
except ImportError:
continue
return unittest.skip("{0}: none are available".format(', '.join(modules)))
class TestRhnRegister(unittest.TestCase):
def setUp(self):
self.module = rhn_register
self.module.HAS_UP2DATE_CLIENT = True
load_config_return = {
'serverURL': 'https://xmlrpc.rhn.redhat.com/XMLRPC',
'systemIdPath': '/etc/sysconfig/rhn/systemid'
}
self.mock_load_config = patch.object(rhn_register.Rhn, 'load_config', return_value=load_config_return)
self.mock_load_config.start()
self.addCleanup(self.mock_load_config.stop)
self.mock_exit_fail = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
self.mock_exit_fail.start()
self.addCleanup(self.mock_exit_fail.stop)
enable_patcher = patch.object(rhn_register.Rhn, 'enable')
self.mock_enable = enable_patcher.start()
self.addCleanup(enable_patcher.stop)
def tearDown(self):
pass
# This one fails, module needs to be fixed.
# @patch('os.path.isfile')
# def test_systemid_requirements_missing(self, mock_isfile):
# """Check that missing dependencies are detected"""
#
# def mock_import(name, *args):
# if name in ['libxml2', 'libxml']:
# raise ImportError()
# else:
# return orig_import(name, *args)
#
# mock_isfile.return_value = True
# with patch('ansible.modules.packaging.os.rhn_register.open', mock_open(read_data=SYSTEMID), create=True):
# orig_import = __import__
# with patch('__builtin__.__import__', side_effect=mock_import):
# rhn = self.module.Rhn()
# with self.assertRaises(AnsibleFailJson):
# rhn.systemid
@skipWhenAllModulesMissing(['libxml2', 'libxml'])
@patch('os.path.isfile')
def test_systemid_with_requirements(self, mock_isfile):
"""Check systemid property"""
def mock_import(name, *args):
if name in ['libxml2', 'libxml']:
raise ImportError()
else:
return orig_import(name, *args)
mock_isfile.return_value = True
with patch('ansible.modules.packaging.os.rhn_register.open', mock_open(read_data=SYSTEMID), create=True):
orig_import = __import__
with patch('__builtin__.__import__', side_effect=mock_import):
rhn = self.module.Rhn()
self.assertEqual('123456789', to_native(rhn.systemid))
def test_without_required_parameters(self):
"""Failure must occurs when all parameters are missing"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
self.module.main()
def test_register_parameters(self):
"""Registering an unregistered host"""
set_module_args({
'activationkey': 'key',
'username': 'user',
'password': 'pass',
})
responses = [
('auth.login', ['X' * 43]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('channel.software.setSystemChannels', [1]),
('auth.logout', [1]),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with patch.object(rhn_register.Rhn, 'systemid', PropertyMock(return_value=12345)):
with mock_request(responses, self.module.__name__):
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertFalse(responses) # all responses should have been consumed
self.assertEqual(self.mock_enable.call_count, 1)
self.mock_enable.reset_mock()
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args[0][0][0], '/usr/sbin/rhnreg_ks')
def test_register_add_channel(self):
"""Register an unregistered host and add another channel"""
set_module_args({
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'channels': 'rhel-x86_64-server-6-debuginfo'
})
responses = [
('auth.login', ['X' * 43]),
('channel.software.listSystemChannels', [[{
'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)',
'channel_label': 'rhel-x86_64-server-6'}]]),
('channel.software.setSystemChannels', [1]),
('auth.logout', [1]),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with patch.object(rhn_register.Rhn, 'systemid', PropertyMock(return_value=12345)):
with mock_request(responses, self.module.__name__):
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertFalse(responses) # all responses should have been consumed
self.assertEqual(self.mock_enable.call_count, 1)
self.mock_enable.reset_mock()
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args[0][0][0], '/usr/sbin/rhnreg_ks')
def test_already_registered(self):
"""Register an host already registered, check that result is
unchanged"""
set_module_args({
'activationkey': 'key',
'username': 'user',
'password': 'pass',
})
responses = []
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
with patch.object(rhn_register.Rhn, 'is_registered', PropertyMock(return_value=True)) as mock_systemid:
with mock_request(responses, self.module.__name__) as req:
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertFalse(result.exception.args[0]['changed'])
self.assertFalse(req.called)
self.assertEqual(mock_systemid.call_count, 1)
self.assertEqual(self.mock_enable.call_count, 0)
self.assertFalse(run_command.called)
@patch('os.unlink')
def test_unregister(self, mock_unlink):
"""Unregister an host, check that result is changed"""
mock_unlink.return_value = True
set_module_args({
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'state': 'absent',
})
responses = [
('auth.login', ['X' * 43]),
('system.deleteSystems', [1]),
('auth.logout', [1]),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
mock_is_registered = PropertyMock(return_value=True)
mock_systemid = PropertyMock(return_value=12345)
with patch.multiple(rhn_register.Rhn, systemid=mock_systemid, is_registered=mock_is_registered):
with mock_request(responses, self.module.__name__):
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertFalse(responses) # all responses should have been consumed
self.assertEqual(mock_systemid.call_count, 1)
self.assertEqual(mock_is_registered.call_count, 1)
self.assertFalse(run_command.called)
self.assertEqual(mock_unlink.call_count, 1)
@patch('os.unlink')
def test_unregister_not_registered(self, mock_unlink):
"""Unregister a unregistered host (systemid missing)
locally, check that result is unchanged"""
mock_unlink.return_value = True
set_module_args({
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'state': 'absent',
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
with patch.object(rhn_register.Rhn, 'is_registered', PropertyMock(return_value=False)) as mock_is_registered:
with patch('ansible.modules.packaging.os.rhn_register.xmlrpc_client.Transport.request') as req:
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertFalse(result.exception.args[0]['changed'])
self.assertFalse(req.called)
self.assertEqual(mock_is_registered.call_count, 1)
self.assertFalse(run_command.called)
self.assertFalse(mock_unlink.called)
@patch('os.unlink')
def test_unregister_unknown_host(self, mock_unlink):
"""Unregister an unknown host (an host with a systemid available
locally, check that result contains failed"""
set_module_args({
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'state': 'absent',
})
responses = [
('auth.login', ['X' * 43]),
('system.deleteSystems', xmlrpc_client.Fault(1003, 'The following systems were NOT deleted: 123456789')),
('auth.logout', [1]),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
mock_is_registered = PropertyMock(return_value=True)
mock_systemid = PropertyMock(return_value=12345)
with patch.multiple(rhn_register.Rhn, systemid=mock_systemid, is_registered=mock_is_registered):
with mock_request(responses, self.module.__name__):
with self.assertRaises(AnsibleFailJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['failed'])
self.assertFalse(responses) # all responses should have been consumed
self.assertEqual(mock_systemid.call_count, 1)
self.assertEqual(mock_is_registered.call_count, 1)
self.assertFalse(run_command.called)
self.assertFalse(mock_unlink.called)
| gpl-3.0 |
svn2github/django | django/contrib/comments/views/utils.py | 192 | 1947 | """
A few bits of helper functions for comment views.
"""
import urllib
import textwrap
from django.http import HttpResponseRedirect
from django.core import urlresolvers
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import comments
def next_redirect(data, default, default_view, **get_kwargs):
"""
Handle the "where should I go next?" part of comment views.
The next value could be a kwarg to the function (``default``), or a
``?next=...`` GET arg, or the URL of a given view (``default_view``). See
the view modules for examples.
Returns an ``HttpResponseRedirect``.
"""
next = data.get("next", default)
if next is None:
next = urlresolvers.reverse(default_view)
if get_kwargs:
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = ('?' in next) and '&' or '?'
next += joiner + urllib.urlencode(get_kwargs) + anchor
return HttpResponseRedirect(next)
def confirmation_view(template, doc="Display a confirmation view."):
"""
Confirmation view generator for the "comment was
posted/flagged/deleted/approved" views.
"""
def confirmed(request):
comment = None
if 'c' in request.GET:
try:
comment = comments.get_model().objects.get(pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
return render_to_response(template,
{'comment': comment},
context_instance=RequestContext(request)
)
confirmed.__doc__ = textwrap.dedent("""\
%s
Templates: `%s``
Context:
comment
The posted comment
""" % (doc, template)
)
return confirmed
| bsd-3-clause |
hhru/ansible | v2/ansible/plugins/callback/__init__.py | 12 | 2941 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#from ansible.utils.display import Display
__all__ = ["CallbackBase"]
class CallbackBase:
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
# FIXME: the list of functions here needs to be updated once we have
# finalized the list of callback methods used in the default callback
def __init__(self, display):
self._display = display
def set_connection_info(self, conn_info):
# FIXME: this is a temporary hack, as the connection info object
# should be created early and passed down through objects
self._display._verbosity = conn_info.verbosity
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
| gpl-3.0 |
Juniper/tempest | tempest/api/compute/admin/test_security_group_default_rules.py | 2 | 5832 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
@classmethod
# TODO(GMann): Once Bug# 1311500 is fixed, these test can run
# for Neutron also.
@testtools.skipIf(CONF.service_available.neutron,
"Skip as this functionality is not yet "
"implemented in Neutron. Related Bug#1311500")
def setup_credentials(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
super(SecurityGroupDefaultRulesTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(SecurityGroupDefaultRulesTest, cls).setup_clients()
cls.adm_client = cls.os_admin.security_group_default_rules_client
def _create_security_group_default_rules(self, ip_protocol='tcp',
from_port=22, to_port=22,
cidr='10.10.0.0/24'):
# Create Security Group default rule
rule = self.adm_client.create_security_default_group_rule(
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr)['security_group_default_rule']
self.assertEqual(ip_protocol, rule['ip_protocol'])
self.assertEqual(from_port, rule['from_port'])
self.assertEqual(to_port, rule['to_port'])
self.assertEqual(cidr, rule['ip_range']['cidr'])
return rule
@decorators.idempotent_id('6d880615-eec3-4d29-97c5-7a074dde239d')
def test_create_delete_security_group_default_rules(self):
# Create and delete Security Group default rule
ip_protocols = ['tcp', 'udp', 'icmp']
for ip_protocol in ip_protocols:
rule = self._create_security_group_default_rules(ip_protocol)
# Delete Security Group default rule
self.adm_client.delete_security_group_default_rule(rule['id'])
self.assertRaises(lib_exc.NotFound,
self.adm_client.show_security_group_default_rule,
rule['id'])
@decorators.idempotent_id('4d752e0a-33a1-4c3a-b498-ff8667ca22e5')
def test_create_security_group_default_rule_without_cidr(self):
ip_protocol = 'udp'
from_port = 80
to_port = 80
rule = self.adm_client.create_security_default_group_rule(
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port)['security_group_default_rule']
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
self.assertNotEqual(0, rule['id'])
self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
@decorators.idempotent_id('29f2d218-69b0-4a95-8f3d-6bd0ef732b3a')
def test_create_security_group_default_rule_with_blank_cidr(self):
ip_protocol = 'icmp'
from_port = 10
to_port = 10
cidr = ''
rule = self.adm_client.create_security_default_group_rule(
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr)['security_group_default_rule']
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
self.assertNotEqual(0, rule['id'])
self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
@decorators.idempotent_id('6e6de55e-9146-4ae0-89f2-3569586e0b9b')
def test_security_group_default_rules_list(self):
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.10.0.0/24'
rule = self._create_security_group_default_rules(ip_protocol,
from_port,
to_port,
cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
rules = (self.adm_client.list_security_group_default_rules()
['security_group_default_rules'])
self.assertNotEmpty(rules)
self.assertIn(rule, rules)
@decorators.idempotent_id('15cbb349-86b4-4f71-a048-04b7ef3f150b')
def test_default_security_group_default_rule_show(self):
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.10.0.0/24'
rule = self._create_security_group_default_rules(ip_protocol,
from_port,
to_port,
cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
fetched_rule = self.adm_client.show_security_group_default_rule(
rule['id'])['security_group_default_rule']
self.assertEqual(rule, fetched_rule)
| apache-2.0 |
rickerc/horizon_audit | openstack_dashboard/dashboards/router/nexus1000v/panel.py | 11 | 1146 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
# @author: Sergey Sudakovich, Cisco Systems, Inc.
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from openstack_dashboard.api import neutron as neutron
from openstack_dashboard.dashboards.router import dashboard
class Nexus1000v(horizon.Panel):
name = _("Cisco Nexus 1000v")
slug = 'nexus1000v'
permissions = ('openstack.services.network',)
if neutron.is_port_profiles_supported():
dashboard.Router.register(Nexus1000v)
| apache-2.0 |
apehua/pilas | pilasengine/interfaz/ingreso_de_texto.py | 6 | 3144 | # -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
import re
from pilasengine.interfaz import elemento
class IngresoDeTexto(elemento.Elemento):
def __init__(self, pilas=None, texto_inicial='', x=0, y=0, ancho=300, limite_de_caracteres=20, icono=None):
super(IngresoDeTexto, self).__init__(pilas, x=x, y=y)
self.texto = texto_inicial
self.cursor = ""
self._cargar_lienzo(ancho)
if icono:
self.icono = self.pilas.imagenes.cargar(icono)
else:
self.icono = None
self.imagen_caja = self.pilas.imagenes.cargar("interfaz/caja.png")
self.centro = ("centro", "centro")
self._actualizar_imagen()
self.limite_de_caracteres = limite_de_caracteres
self.cualquier_caracter()
self.pilas.escena.suelta_tecla.conectar(self.cuando_pulsa_una_tecla)
self.pilas.escena_actual().tareas.siempre(0.40, self._actualizar_cursor)
self.fijo = True
def _actualizar_cursor(self):
if (self.tiene_el_foco):
if self.cursor == "":
self.cursor = "_"
else:
self.cursor = ""
else:
self.cursor = ""
self._actualizar_imagen()
return True
def cualquier_caracter(self):
self.caracteres_permitidos = re.compile(".*")
def solo_numeros(self):
self.caracteres_permitidos = re.compile("\d+")
def solo_letras(self):
self.caracteres_permitidos = re.compile("[a-z]+")
def cuando_pulsa_una_tecla(self, evento):
if self.tiene_el_foco and self.activo:
if evento.codigo == '\x08' or evento.texto == '\x08':
# Indica que se quiere borrar un caracter
self.texto = self.texto[:-1]
else:
if len(self.texto) < self.limite_de_caracteres:
nuevo_texto = self.texto + evento.texto
if (self.caracteres_permitidos.match(evento.texto)):
self.texto = self.texto + evento.texto
else:
print "Rechazando el ingreso del caracter:", evento.texto
else:
print "Rechazando caracter por llegar al limite."
self._actualizar_imagen()
def _cargar_lienzo(self, ancho):
self.imagen = self.pilas.imagenes.cargar_superficie(ancho, 30)
def _actualizar_imagen(self):
ancho = self.imagen_caja.ancho()
alto = self.imagen_caja.alto()
self.imagen.pintar_parte_de_imagen(self.imagen_caja, 0, 0, 40, ancho, 0, 0)
if self.icono:
dx = 20
self.imagen.pintar_parte_de_imagen(self.icono, 0, 0, 40, ancho, 7, 7)
else:
dx = 0
for x in range(40, self.imagen.ancho() - 40):
self.imagen.pintar_parte_de_imagen(self.imagen_caja, ancho - 40, 0, 40, alto, x, 0)
self.imagen.texto(self.texto + self.cursor, 15 + dx, 10) | lgpl-3.0 |
eternalthinker/flask-server-rq-example | venv/lib/python2.7/site-packages/requests/packages/urllib3/response.py | 196 | 12240 | import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
from .packages.six import string_types as basestring, binary_type, PY3
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
| apache-2.0 |
mdanielwork/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/tests/basic.py | 137 | 3575 | from django.test import TestCase
from django.contrib.auth.models import User, AnonymousUser
from django.core.management import call_command
from StringIO import StringIO
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u.has_usable_password())
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_createsuperuser_management_command(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEquals(u.email, 'joe@somewhere.org')
self.assertTrue(u.check_password(''))
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEquals(u.email, 'joe2@somewhere.org')
self.assertTrue(u.check_password(''))
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User.objects.get(username="joe+admin@somewhere.org")
self.assertEquals(u.email, 'joe@somewhere.org')
self.assertTrue(u.check_password(''))
| apache-2.0 |
MarcosCommunity/odoo | addons/hr_payroll/wizard/__init__.py | 442 | 1159 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sunny-wyb/xen-4.1.2 | tools/python/build/lib.linux-x86_64-2.7/xen/xend/XendPIFMetrics.py | 48 | 2046 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (c) 2006-2007 Xensource Inc.
#============================================================================
from XendBase import XendBase
class XendPIFMetrics(XendBase):
"""PIF Metrics."""
def getClass(self):
return "PIF_metrics"
def getAttrRO(self):
attrRO = ['io_read_kbs',
'io_write_kbs',
'last_updated',
'pif']
return XendBase.getAttrRO() + attrRO
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
def __init__(self, uuid, pif_uuid):
XendBase.__init__(self, uuid, {})
self.pif_uuid = pif_uuid
def get_pif(self):
return self.pif_uuid
def get_io_read_kbs(self):
return self._get_stat(0)
def get_io_write_kbs(self):
return self._get_stat(1)
def _get_stat(self, n):
from xen.xend.XendNode import instance as xennode
#pifname = self.pif.device
#pifs_util = xennode().monitor.get_pifs_util()
#if pifname in pifs_util:
# return pifs_util[pifname][n]
return 0.0
def get_last_updated(self):
import xen.xend.XendAPI as XendAPI
return XendAPI.now()
| gpl-2.0 |
xju2/hzzws | scripts/low_mass.py | 1 | 2906 | #!/usr/bin/env python
import common
import glob
name = "Low"
binning = "60, 110, 140"
branch = "m4l_constrained, "+binning
###in workspace
obs_binning = binning
# key: category name
# value: TCut on mini-tree
categories = {
"ggF_4mu_13TeV" : "(event_type==0)",
"ggF_2mu2e_13TeV" : "(event_type==2)",
"ggF_2e2mu_13TeV" : "(event_type==3)",
"ggF_4e_13TeV" : "(event_type==1)",
}
#categories = {"all" : "(1==1)"}
sig_samples = ["ggH", "VBFH", "ZH", "WH", "ttH"]
bkg_samples = ["qqZZ", "Zjets",
"ggZZ"
]
samples = sig_samples + bkg_samples
samples_para = samples
samples_lowmass_sig125 = {
"ggH":common.minitree_dir+"mc15_13TeV.341505.PowhegPythia8EvtGen_CT10_AZNLOCTEQ6L1_ggH125_ZZ4lep_noTau.root",
"VBFH":common.minitree_dir+"mc15_13TeV.341518.PowhegPythia8EvtGen_CT10_AZNLOCTEQ6L1_VBFH125_ZZ4lep_noTau.root",
"WH":common.minitree_dir+"mc15_13TeV.341964.Pythia8EvtGen_A14NNPDF23LO_WH125_ZZ4l.root",
"ZH":common.minitree_dir+"mc15_13TeV.341947.Pythia8EvtGen_A14NNPDF23LO_ZH125_ZZ4l.root",
"ttH":common.minitree_dir+"mc15_13TeV.342561.aMcAtNloHerwigppEvtGen_UEEE5_CTEQ6L1_CT10ME_ttH125_4l.root",
}
#masses = [124, 125, 126]
masses = [125]
mass_points = len(masses)
def get_mass(im):
return masses[im]
def get_sample_dict(mass):
tmp_res = {}
sample_list = sig_samples
for sample_name in sample_list:
pattern = common.minitree_dir+"*"+sample_name+str(mass)+"_*4l*.root"
file_list = glob.glob(pattern)
#print mass,len(file_list), file_list
if len(file_list) == 1:
tmp_res[sample_name] = file_list[0]
elif len(file_list) == 2:
for ff in file_list:
if "noTau" in ff:
tmp_res[sample_name] = ff
return tmp_res
def get_signal_dict():
tmp_dic = {}
for im in range(mass_points):
mass = get_mass(im)
tmp_dic[str(mass)] = get_sample_dict(mass)
return tmp_dic
samples_sig = get_signal_dict()
samples_bkg = {
#"qqZZ":common.minitree_dir+"mc15_13TeV.342556.PowhegPy8EG_CT10nloME_AZNLOCTEQ6L1_ZZllll_mll4_m4l_100_150.root",
"qqZZ":"/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2015/MiniTrees/Prod_v03/mc_15b/Nominal/mc15_13TeV.342556.PowhegPy8EG_CT10nloME_AZNLOCTEQ6L1_ZZllll_mll4_m4l_100_150.root",
#"Zjets":common.minitree_dir+"combined/mc15_redBkg_filtered.root"
"Zjets":"/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2015/MiniTrees/Prod_v01/mc/Nominal/combined/mc15_redBkg_filtered.root",
"ggZZ":common.minitree_dir+"mc15_gg2ZZ_low.root",
}
def print_samples():
for sample,add in samples_bkg.iteritems():
print sample,add
for sample,add in samples_sig["125"].iteritems():
print sample,add
#print_samples()
samples_sig_scale = 1.0
samples_bkg_scale = 1.0
data = common.minitree_dir+"../../data15_grl_v73.root"
if __name__ == "__main__":
print_samples()
| mit |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/unittest/test/test_suite.py | 108 | 15184 | import unittest
import gc
import sys
import weakref
from unittest.test.support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# countTestCases() still works after tests are run
suite.run(unittest.TestResult())
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# countTestCases() still works after tests are run
suite.run(unittest.TestResult())
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# countTestCases() still works after tests are run
suite_1.run(unittest.TestResult())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2.run(unittest.TestResult())
self.assertEqual(suite_2.countTestCases(), 2)
suite_3.run(unittest.TestResult())
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
# countTestCases() still works after tests are run
suite.run(unittest.TestResult())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# countTestCases() still works after tests are run
suite.run(unittest.TestResult())
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# countTestCases() still works after tests are run
parent.run(unittest.TestResult())
self.assertEqual(parent.countTestCases(), 4)
self.assertEqual(child.countTestCases(), 2)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# countTestCases() still works after tests are run
suite.run(unittest.TestResult())
self.assertEqual(suite.countTestCases(), 1)
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# countTestCases() still works after tests are run
suite.run(unittest.TestResult())
self.assertEqual(suite.countTestCases(), 1)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_remove_test_at_index(self):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
suite = unittest.TestSuite()
suite._tests = [1, 2, 3]
suite._removeTestAtIndex(1)
self.assertEqual([1, None, 3], suite._tests)
def test_remove_test_at_index_not_indexable(self):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
suite = unittest.TestSuite()
suite._tests = None
# if _removeAtIndex raises for noniterables this next line will break
suite._removeTestAtIndex(2)
def assert_garbage_collect_test_after_run(self, TestSuiteClass):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
class Foo(unittest.TestCase):
def test_nothing(self):
pass
test = Foo('test_nothing')
wref = weakref.ref(test)
suite = TestSuiteClass([wref()])
suite.run(unittest.TestResult())
del test
# for the benefit of non-reference counting implementations
gc.collect()
self.assertEqual(suite._tests, [None])
self.assertIsNone(wref())
def test_garbage_collect_test_after_run_BaseTestSuite(self):
self.assert_garbage_collect_test_after_run(unittest.BaseTestSuite)
def test_garbage_collect_test_after_run_TestSuite(self):
self.assert_garbage_collect_test_after_run(unittest.TestSuite)
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
self.assertEqual(suite.countTestCases(), 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Lothiraldan/OneTask | onetask/tests.py | 1 | 4351 | # -*- coding: utf-8 -*-
import os
import json
import tempfile
import unittest
from .collection import TaskCollection
from subprocess import check_output, CalledProcessError
class TaskCollectionTest(unittest.TestCase):
def _create_db(self, **kwargs):
temp = tempfile.NamedTemporaryFile(prefix='onetasktest', suffix='.json',
mode='w+t', delete=False)
temp.write(json.dumps(dict(**kwargs)))
temp.read()
return temp
def _load(self, **kwargs):
temp = self._create_db(**kwargs)
return TaskCollection.load(temp.name)
def assertCommandOK(self, command):
try:
check_output(command)
except CalledProcessError as err:
raise AssertionError('Command is not ok: ' % err)
def assertCommandKO(self, command):
assert isinstance(command, (list, tuple,))
self.assertRaises(CalledProcessError, check_output, command)
def test_load(self):
tasks = self._load(tasks=[{"title": "task1"}, {"title": "task2"}])
self.assertEquals(len(tasks.data['tasks']), 2)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
self.assertEquals(tasks.data['tasks'][1]['title'], 'task2')
def test_add(self):
tasks = self._load(tasks=[])
tasks.add('task1')
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
tasks.add('task2')
self.assertEquals(len(tasks.data['tasks']), 2)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
tasks.add('task3')
self.assertEquals(len(tasks.data['tasks']), 3)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task1')
def test_get(self):
tasks = self._load(tasks=[{"title": "task1", "created": 1000}],
current=None, archive=[])
self.assertEqual(tasks.get(), 'task1')
for x in range(2, 100):
tasks.add('task%d' % x)
self.assertEqual(len(tasks.data['tasks']), x - 1)
self.assertEquals(tasks.get(), 'task1')
tasks.done(closed=3000)
self.assertEqual(len(tasks.data['tasks']), x - 1)
self.assertNotEquals(tasks.get(), 'task1')
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
self.assertEquals(tasks.data['archive'][0]['duration'], 2000)
def test_done(self):
tasks = self._load(tasks=[], current=None, archive=[])
tasks.add('task1')
self.assertEquals(tasks.get(), 'task1')
self.assertEquals(len(tasks.data['tasks']), 0)
tasks.add('task2')
self.assertEquals(tasks.get(), 'task1')
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(len(tasks.data['archive']), 0)
tasks.done()
self.assertEquals(len(tasks.data['tasks']), 1)
self.assertEquals(tasks.data['tasks'][0]['title'], 'task2')
self.assertEquals(len(tasks.data['archive']), 1)
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
tasks.get()
tasks.done()
self.assertEquals(len(tasks.data['tasks']), 0)
self.assertEquals(len(tasks.data['archive']), 2)
self.assertEquals(tasks.data['archive'][0]['title'], 'task1')
self.assertEquals(tasks.data['archive'][1]['title'], 'task2')
def test_skip(self):
tasks = self._load(tasks=[{"title": "task1"},
{"title": "task2"},
{"title": "task3"}],
current=None)
current = tasks.get()
for i in range(4):
tasks.skip()
new = tasks.get()
self.assertNotEquals(current, new)
current = new
def test_cli(self):
tmp_path = self._create_db(current=None, tasks=[], archive=[]).name
os.environ['ONETASK_DB'] = tmp_path
executable = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'bin', 'onetask'))
self.assertCommandOK([executable])
self.assertCommandOK([executable, 'add', 'plop'])
self.assertEquals(check_output([executable, 'get']), b'plop\n')
self.assertCommandOK([executable, 'done'])
self.assertCommandKO([executable, 'get'])
if __name__ == '__main__':
unittest.main()
| mit |
PremiumGraphics/DirectView | ThirdParty/wxWidgets-3.0.2/docs/doxygen/scripts/sip_tools.py | 23 | 3152 | import os
from common import *
class SIPBuilder:
def __init__(self, doxyparse, outputdir):
self.doxyparser = doxyparse
self.output_dir = outputdir
def make_bindings(self):
output_dir = os.path.abspath(os.path.join(self.output_dir, "sip"))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for aclass in self.doxyparser.classes:
if aclass.name in excluded_classes:
print "Skipping %s" % aclass.name
continue
header_name = aclass.name[2:].lower()
filename = os.path.join(output_dir, "_" + header_name + ".sip")
enums_text = make_enums(aclass)
method_text = self.make_sip_methods(aclass)
base_class = get_first_value(aclass.bases)
if base_class != "":
base_class = ": %s" % base_class
text = """
%s
class %s %s
{
%%TypeHeaderCode
#include <%s>
%%End
public:
%s
};
""" % (enums_text, aclass.name, base_class, get_first_value(aclass.includes), method_text)
afile = open(filename, "wb")
afile.write(text)
afile.close()
def make_sip_methods(self, aclass):
retval = ""
for amethod in aclass.constructors + aclass.methods:
transfer = ""
# FIXME: we need to come up with a way of filtering the methods out by various criteria
# including parameters and method name, and how to deal with overloads
if aclass.name in ignored_methods:
should_ignore = False
for method in ignored_methods[aclass.name]:
print "method = %s" % method
if method == amethod.name:
params = ignored_methods[aclass.name][method]
should_ignore = True
for i in xrange(len(params)):
if i >= len(amethod.params):
should_ignore = False
break
elif amethod.params[i]["type"] != params[i]:
print "param type = %s, amethod.param type = %s" % (params[i], amethod.params[i]["type"])
should_ignore = False
break
if should_ignore:
continue
# We need to let SIP know when wx is responsible for deleting the object.
# We do this if the class is derived from wxWindow, since wxTLW manages child windows
# and wxApp deletes all wxTLWs on shutdown
if amethod in aclass.constructors and self.doxyparser.is_derived_from_base(aclass, "wxWindow"):
transfer = "/Transfer/"
if amethod.name.startswith("operator"):
continue
retval += " %s %s%s%s;\n\n" % (amethod.return_type.replace("virtual ", ""), amethod.name, amethod.argsstring, transfer)
return retval
| lgpl-3.0 |
conejoninja/xbmc-seriesly | library_service.py | 1 | 4954 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
import sys
import xbmc,time
from core import scrapertools
from core import config
from core import logger
from core.item import Item
from servers import servertools
logger.info("[library_service.py] Actualizando series...")
from platformcode.xbmc import library
from platformcode.xbmc import launcher
import xbmcgui
#Eliminar carpeta antes de actualizar
directorio = os.path.join(config.get_library_path(),"SERIES")
logger.info ("directorio="+directorio)
import shutil
#if os.path.exists(directorio):
# shutil.rmtree(directorio)
if not os.path.exists(directorio):
os.mkdir(directorio)
nombre_fichero_config_canal = os.path.join( config.get_data_path() , "series.xml" )
try:
if config.get_setting("updatelibrary")=="true":
config_canal = open( nombre_fichero_config_canal , "r" )
for serie in config_canal.readlines():
logger.info("[library_service.py] serie="+serie)
serie = serie.split(",")
ruta = os.path.join( config.get_library_path() , "SERIES" , serie[0] )
logger.info("[library_service.py] ruta =#"+ruta+"#")
if os.path.exists( ruta ):
logger.info("[library_service.py] Actualizando "+serie[0])
item = Item(url=serie[1], show=serie[0])
try:
itemlist = []
if serie[2].strip()=='veranime':
from seriesly.channels import veranime
itemlist = veranime.episodios(item)
if serie[2].strip()=='tumejortv':
from seriesly.channels import tumejortv
itemlist = tumejortv.findepisodios(item)
if serie[2].strip()=='shurweb':
from seriesly.channels import shurweb
itemlist = shurweb.episodios(item)
if serie[2].strip()=='seriespepito':
from seriesly.channels import seriespepito
itemlist = seriespepito.episodios(item)
if serie[2].strip()=='seriesyonkis':
from seriesly.channels import seriesyonkis
itemlist = seriesyonkis.episodios(item)
if serie[2].strip()=='seriesly':
from seriesly.channels import seriesly
itemlist = seriesly.episodios(item)
if serie[2].strip()=='cuevana':
from seriesly.channels import cuevana
itemlist = cuevana.episodios(item)
if serie[2].strip()=='animeflv':
from seriesly.channels import animeflv
itemlist = animeflv.episodios(item)
if serie[2].strip()=='animeid':
from seriesly.channels import animeid
itemlist = animeid.episodios(item)
if serie[2].strip()=='moviezet':
from seriesly.channels import moviezet
itemlist = moviezet.serie(item)
except:
import traceback
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
itemlist = []
else:
logger.info("[library_service.py] No actualiza "+serie[0]+" (no existe el directorio)")
itemlist=[]
for item in itemlist:
#logger.info("item="+item.tostring())
try:
item.show=serie[0].strip()
library.savelibrary( titulo=item.title , url=item.url , thumbnail=item.thumbnail , server=item.server , plot=item.plot , canal=item.channel , category="Series" , Serie=item.show , verbose=False, accion="play_from_library", pedirnombre=False, subtitle=item.subtitle )
except:
logger.info("[library_service.py] Capitulo no valido")
import xbmc
xbmc.executebuiltin('UpdateLibrary(video)')
else:
logger.info("No actualiza la biblioteca, está desactivado en la configuración de seriesly")
except:
logger.info("[library_service.py] No hay series para actualizar")
| gpl-3.0 |
joansmith/openmicroscopy | components/tools/OmeroWeb/test/integration/test_chgrp.py | 7 | 12437 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests chgrp functionality of views.py
"""
from omero.model import ProjectI, DatasetI, TagAnnotationI
from omero.rtypes import rstring
from omero.gateway import BlitzGateway
import pytest
import time
from weblibrary import IWebTest, _get_response, _csrf_post_response
from django.core.urlresolvers import reverse
import json
PRIVATE = 'rw----'
READONLY = 'rwr---'
READANNOTATE = 'rwra--'
COLLAB = 'rwrw--'
class TestChgrp(IWebTest):
"""
Tests chgrp
"""
@classmethod
def setup_class(cls):
"""Returns a logged in Django test client."""
super(TestChgrp, cls).setup_class()
# Add user to secondary group
cls.group2 = cls.new_group(
experimenters=[cls.ctx.userName], perms=PRIVATE)
# Refresh client
cls.ctx = cls.sf.getAdminService().getEventContext()
cls.django_client = cls.new_django_client(
cls.ctx.userName, cls.ctx.userName)
def get_django_client(self, credentials):
if credentials == 'user':
return self.django_client
else:
return self.django_root_client
@pytest.fixture
def dataset(self):
"""Returns a new OMERO Project with required fields set."""
dataset = DatasetI()
dataset.name = rstring(self.uuid())
return self.update.saveAndReturnObject(dataset)
@pytest.fixture
def project(self):
"""Returns a new OMERO Project with required fields set."""
project = ProjectI()
project.name = rstring(self.uuid())
return self.update.saveAndReturnObject(project)
@pytest.fixture
def projects_dataset_image_tag(self):
"""
Returns 2 new OMERO Projects, linked Dataset and linked Image populated
by an L{test.integration.library.ITest} instance with required fields
set. Also a Tag linked to both Projects.
"""
project1 = ProjectI()
project1.name = rstring(self.uuid())
project2 = ProjectI()
project2.name = rstring(self.uuid())
dataset = DatasetI()
dataset.name = rstring(self.uuid())
image = self.new_image(name=self.uuid())
dataset.linkImage(image)
project1.linkDataset(dataset)
project2.linkDataset(dataset)
tag = TagAnnotationI()
tag.textValue = rstring("ChgrpTag")
project1.linkAnnotation(tag)
project2.linkAnnotation(tag)
return self.update.saveAndReturnArray([project1, project2])
@pytest.mark.parametrize("credentials", ['user', 'admin'])
def test_load_chgrp_groups(self, project, credentials):
"""
A user in 2 groups should have options to move object
from one group to another.
"""
django_client = self.get_django_client(credentials)
request_url = reverse('load_chgrp_groups')
data = {
"Project": project.id.val
}
data = _get_response_json(django_client, request_url, data)
assert 'groups' in data
assert len(data['groups']) == 1
assert data['groups'][0]['id'] == self.group2.id.val
@pytest.mark.parametrize("credentials", ['user']) # TODO - add 'admin'
def test_chgrp_dry_run(self, projects_dataset_image_tag, credentials):
"""
Performs a chgrp POST, polls the activities json till done,
then checks that Dataset has moved to new group and has new
Project as parent.
"""
def doDryRun(data):
request_url = reverse('chgrpDryRun')
rsp = _csrf_post_response(django_client, request_url, data)
jobId = rsp.content
# Keep polling activities until dry-run job completed
activities_url = reverse('activities_json')
data = {'jobId': jobId}
rsp = _get_response_json(django_client, activities_url, data)
while rsp['finished'] is not True:
time.sleep(0.5)
rsp = _get_response_json(django_client, activities_url, data)
return rsp
django_client = self.get_django_client(credentials)
pdit = projects_dataset_image_tag
projectId = pdit[0].id.val
projectId2 = pdit[1].id.val
dataset, = pdit[0].linkedDatasetList()
image, = dataset.linkedImageList()
tag, = pdit[0].linkedAnnotationList()
# If we try to move single Project, Dataset, Tag remain
data = {
"group_id": self.group2.id.val,
"Project": projectId
}
rsp = doDryRun(data)
unlinked = {'Files': [],
'Tags': [{'id': tag.id.val,
'name': 'ChgrpTag'}],
'Datasets': [{'id': dataset.id.val,
'name': dataset.name.val}],
'Comments': 0, 'Others': 0}
assert 'includedObjects' in rsp
assert rsp['includedObjects'] == {'Projects': [projectId]}
assert 'unlinkedDetails' in rsp
assert rsp['unlinkedDetails'] == unlinked
# If we try to move both Projects all data moves
data = {
"group_id": self.group2.id.val,
"Project": "%s,%s" % (projectId, projectId2)
}
rsp = doDryRun(data)
pids = [projectId, projectId2]
pids.sort()
assert rsp['includedObjects'] == {'Projects': pids,
'Datasets': [dataset.id.val],
'Images': [image.id.val]}
assert rsp['unlinkedDetails'] == {'Files': [], 'Tags': [],
'Comments': 0, 'Others': 0}
@pytest.mark.parametrize("credentials", ['user', 'admin'])
def test_chgrp_new_container(self, dataset, credentials):
"""
Performs a chgrp POST, polls the activities json till done,
then checks that Dataset has moved to new group and has new
Project as parent.
"""
django_client = self.get_django_client(credentials)
request_url = reverse('chgrp')
projectName = "chgrp-project%s" % (self.uuid())
data = {
"group_id": self.group2.id.val,
"Dataset": dataset.id.val,
"new_container_name": projectName,
"new_container_type": "project",
}
data = _csrf_post_response_json(django_client, request_url, data)
expected = {"update": {"childless": {"project": [],
"orphaned": False,
"dataset": []},
"remove": {"project": [],
"plate": [],
"screen": [],
"image": [],
"dataset": [dataset.id.val]}}}
assert data == expected
activities_url = reverse('activities_json')
data = _get_response_json(django_client, activities_url, {})
# Keep polling activities until no jobs in progress
while data['inprogress'] > 0:
time.sleep(0.5)
data = _get_response_json(django_client, activities_url, {})
# individual activities/jobs are returned as dicts within json data
for k, o in data.items():
if hasattr(o, 'values'): # a dict
if 'report' in o:
print o['report']
assert o['status'] == 'finished'
assert o['job_name'] == 'Change group'
assert o['to_group_id'] == self.group2.id.val
# Dataset should now be in new group, contained in new Project
conn = BlitzGateway(client_obj=self.client)
userId = conn.getUserId()
conn.SERVICE_OPTS.setOmeroGroup('-1')
d = conn.getObject("Dataset", dataset.id.val)
assert d is not None
assert d.getDetails().group.id.val == self.group2.id.val
p = d.getParent()
assert p is not None
assert p.getName() == projectName
# Project owner should be current user
assert p.getDetails().owner.id.val == userId
@pytest.mark.parametrize("credentials", ['user', 'admin'])
def test_chgrp_old_container(self, dataset, credentials):
"""
Tests Admin moving user's Dataset to their Private group and
linking it to an existing Project there.
Bug from https://github.com/openmicroscopy/openmicroscopy/pull/3420
"""
django_client = self.get_django_client(credentials)
# user creates project in their target group
project = ProjectI()
projectName = "chgrp-target-%s" % self.client.getSessionId()
project.name = rstring(projectName)
ctx = {"omero.group": str(self.group2.id.val)}
project = self.sf.getUpdateService().saveAndReturnObject(project, ctx)
request_url = reverse('chgrp')
data = {
"group_id": self.group2.id.val,
"Dataset": dataset.id.val,
"target_id": "project-%s" % project.id.val,
}
data = _csrf_post_response_json(django_client, request_url, data)
expected = {"update": {"childless": {"project": [],
"orphaned": False,
"dataset": []},
"remove": {"project": [],
"plate": [],
"screen": [],
"image": [],
"dataset": [dataset.id.val]}}}
assert data == expected
activities_url = reverse('activities_json')
data = _get_response_json(django_client, activities_url, {})
# Keep polling activities until no jobs in progress
while data['inprogress'] > 0:
time.sleep(0.5)
data = _get_response_json(django_client, activities_url, {})
# individual activities/jobs are returned as dicts within json data
for k, o in data.items():
if hasattr(o, 'values'): # a dict
if 'report' in o:
print o['report']
assert o['status'] == 'finished'
assert o['job_name'] == 'Change group'
assert o['to_group_id'] == self.group2.id.val
# Dataset should now be in new group, contained in Project
conn = BlitzGateway(client_obj=self.client)
userId = conn.getUserId()
conn.SERVICE_OPTS.setOmeroGroup('-1')
d = conn.getObject("Dataset", dataset.id.val)
assert d is not None
assert d.getDetails().group.id.val == self.group2.id.val
p = d.getParent()
assert p is not None
assert p.getName() == projectName
# Project owner should be current user
assert p.getDetails().owner.id.val == userId
assert p.getId() == project.id.val
# Helpers
def _get_response_json(django_client, request_url,
query_string, status_code=200):
rsp = _get_response(django_client, request_url, query_string, status_code)
assert rsp.get('Content-Type') == 'application/json'
return json.loads(rsp.content)
def _csrf_post_response_json(django_client, request_url,
query_string, status_code=200):
rsp = _csrf_post_response(django_client, request_url,
query_string, status_code)
assert rsp.get('Content-Type') == 'application/json'
return json.loads(rsp.content)
| gpl-2.0 |
MFoster/breeze | django/contrib/localflavor/in_/in_states.py | 197 | 2932 | """
A mapping of state misspellings/abbreviations to normalized abbreviations, and
an alphabetical list of states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
STATE_CHOICES = (
('KA', 'Karnataka'),
('AP', 'Andhra Pradesh'),
('KL', 'Kerala'),
('TN', 'Tamil Nadu'),
('MH', 'Maharashtra'),
('UP', 'Uttar Pradesh'),
('GA', 'Goa'),
('GJ', 'Gujarat'),
('RJ', 'Rajasthan'),
('HP', 'Himachal Pradesh'),
('JK', 'Jammu and Kashmir'),
('AR', 'Arunachal Pradesh'),
('AS', 'Assam'),
('BR', 'Bihar'),
('CG', 'Chattisgarh'),
('HR', 'Haryana'),
('JH', 'Jharkhand'),
('MP', 'Madhya Pradesh'),
('MN', 'Manipur'),
('ML', 'Meghalaya'),
('MZ', 'Mizoram'),
('NL', 'Nagaland'),
('OR', 'Orissa'),
('PB', 'Punjab'),
('SK', 'Sikkim'),
('TR', 'Tripura'),
('UA', 'Uttarakhand'),
('WB', 'West Bengal'),
# Union Territories
('AN', 'Andaman and Nicobar'),
('CH', 'Chandigarh'),
('DN', 'Dadra and Nagar Haveli'),
('DD', 'Daman and Diu'),
('DL', 'Delhi'),
('LD', 'Lakshadweep'),
('PY', 'Pondicherry'),
)
STATES_NORMALIZED = {
'an': 'AN',
'andaman and nicobar': 'AN',
'andra pradesh': 'AP',
'andrapradesh': 'AP',
'andhrapradesh': 'AP',
'ap': 'AP',
'andhra pradesh': 'AP',
'ar': 'AR',
'arunachal pradesh': 'AR',
'assam': 'AS',
'as': 'AS',
'bihar': 'BR',
'br': 'BR',
'cg': 'CG',
'chattisgarh': 'CG',
'ch': 'CH',
'chandigarh': 'CH',
'daman and diu': 'DD',
'dd': 'DD',
'dl': 'DL',
'delhi': 'DL',
'dn': 'DN',
'dadra and nagar haveli': 'DN',
'ga': 'GA',
'goa': 'GA',
'gj': 'GJ',
'gujarat': 'GJ',
'himachal pradesh': 'HP',
'hp': 'HP',
'hr': 'HR',
'haryana': 'HR',
'jharkhand': 'JH',
'jh': 'JH',
'jammu and kashmir': 'JK',
'jk': 'JK',
'karnataka': 'KA',
'karnatka': 'KA',
'ka': 'KA',
'kerala': 'KL',
'kl': 'KL',
'ld': 'LD',
'lakshadweep': 'LD',
'maharastra': 'MH',
'mh': 'MH',
'maharashtra': 'MH',
'meghalaya': 'ML',
'ml': 'ML',
'mn': 'MN',
'manipur': 'MN',
'madhya pradesh': 'MP',
'mp': 'MP',
'mizoram': 'MZ',
'mizo': 'MZ',
'mz': 'MZ',
'nl': 'NL',
'nagaland': 'NL',
'orissa': 'OR',
'odisa': 'OR',
'orisa': 'OR',
'or': 'OR',
'pb': 'PB',
'punjab': 'PB',
'py': 'PY',
'pondicherry': 'PY',
'rajasthan': 'RJ',
'rajastan': 'RJ',
'rj': 'RJ',
'sikkim': 'SK',
'sk': 'SK',
'tamil nadu': 'TN',
'tn': 'TN',
'tamilnadu': 'TN',
'tamilnad': 'TN',
'tr': 'TR',
'tripura': 'TR',
'ua': 'UA',
'uttarakhand': 'UA',
'up': 'UP',
'uttar pradesh': 'UP',
'westbengal': 'WB',
'bengal': 'WB',
'wb': 'WB',
'west bengal': 'WB'
}
| bsd-3-clause |
xiangel/hue | desktop/core/ext-py/Mako-0.8.1/mako/ext/pygmentplugin.py | 38 | 4540 | # ext/pygmentplugin.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer, Python3Lexer
from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \
include, using
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Other
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from mako import compat
class MakoLexer(RegexLexer):
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
tokens = {
'root': [
(r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)\s*(".*?")',
bygroups(Name.Attribute, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
name = 'HTML+Mako'
aliases = ['html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
aliases = ['xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
aliases = ['css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted',
linenos=True)
def syntax_highlight(filename='', language=None):
mako_lexer = MakoLexer()
if compat.py3k:
python_lexer = Python3Lexer()
else:
python_lexer = PythonLexer()
if filename.startswith('memory:') or language == 'mako':
return lambda string: highlight(string, mako_lexer,
pygments_html_formatter)
return lambda string: highlight(string, python_lexer,
pygments_html_formatter)
| apache-2.0 |
sparkslabs/kamaelia_ | Sketches/MPS/WebCam.py | 3 | 10193 | #!/usr/bin/python
#
# This import line is required to pull in pygame.camera support
#
import sys ;
#sys.path.insert(0, "/home/zathras/Incoming/X/pygame/pygame-nrp/build/lib.linux-i686-2.5")
#sys.path.insert(0, "/home/zathras/code.google/pygame-seul/trunk/build/lib.linux-i686-2.5")
# sys.path.insert(0, "/home/zathras/Documents/pygame-1.9.0rc1/build/lib.linux-i686-2.5")
import time
import pygame
import pygame.camera
import Axon
pygame.init()
pygame.camera.init()
def mprint(*args):
print "VideoCapturePlayer", " ".join([str(x) for x in args])
class VideoCapturePlayer(object):
displaysize = (1024, 768)
capturesize = ( 640, 480 )
mirror = True
delay = 0
def __init__(self, **argd):
self.__dict__.update(**argd)
super(VideoCapturePlayer, self).__init__(**argd)
self.display = pygame.display.set_mode( self.displaysize )
self.camera = X=pygame.camera.Camera("/dev/video1", self.capturesize)
self.camera.start()
# self.camera2 = X=pygame.camera.Camera("/dev/video1", self.capturesize)
# self.camera2.start()
def get_and_flip(self):
snapshot = self.camera.get_image()
# snapshot2 = self.camera2.get_image()
# print snapshot, snapshot2
snapshot = pygame.transform.scale(snapshot,(352,288))
# snapshot2 = pygame.transform.scale(snapshot2,(352,288))
# snapshot = pygame.transform.scale(snapshot,(512,384))
# snapshot = pygame.transform.scale(snapshot,(1024,768))
if 0:
# if self.mirror:
flippedx = pygame.transform.flip(snapshot,1,0)
flippedy = pygame.transform.flip(snapshot,0,1)
flippedxy = pygame.transform.flip(snapshot,1,1)
# self.display.blit(flippedy, (0,384))
# self.display.blit(flippedxy, (512,384))
self.display.blit(snapshot, (0,0))
# self.display.blit(snapshot2, (0,384))
# self.display.blit(flippedx, (512,0))
pygame.display.flip()
def main(self):
while 1:
time.sleep(self.delay)
self.get_and_flip()
if 1:
VideoCapturePlayer().main()
class VideoCapturePlayer(Axon.ThreadedComponent.threadedcomponent):
displaysize = (1024, 768)
capturesize = ( 320, 240 )
mirror = True
delay = 0
def __init__(self, **argd):
super(VideoCapturePlayer, self).__init__(**argd)
self.display = pygame.display.set_mode( self.displaysize )
self.camera = X=pygame.camera.Camera("/dev/video0", self.capturesize)
self.camera.start()
def pygame_display_flip(self):
pygame.display.flip()
def get_and_flip(self):
snapshot = self.camera.get_image()
snapshot = pygame.transform.scale(snapshot,(512,384))
if self.mirror:
flippedx = pygame.transform.flip(snapshot,1,0)
flippedy = pygame.transform.flip(snapshot,0,1)
flippedxy = pygame.transform.flip(snapshot,1,1)
self.display.blit(flippedy, (0,384))
self.display.blit(flippedxy, (512,384))
self.display.blit(snapshot, (0,0))
self.display.blit(flippedx, (512,0))
self.pygame_display_flip()
def main(self):
while 1:
time.sleep(self.delay)
self.get_and_flip()
class VideoCaptureSource(Axon.ThreadedComponent.threadedcomponent):
capturesize = ( 640, 480 )
mirror = True
delay = 0
fps = -1
def __init__(self, **argd):
super(VideoCaptureSource, self).__init__(**argd)
self.camera = X=pygame.camera.Camera("/dev/video0", self.capturesize)
self.camera.start()
if self.fps != -1:
self.delay = 1.0/self.fps # fps overrides delay
def capture_one(self):
snapshot = self.camera.get_image()
if self.mirror:
snapshot = pygame.transform.flip(snapshot,1,0)
return snapshot
def main(self):
while 1:
self.send(self.capture_one(), "outbox")
time.sleep(self.delay) # This would be far better to be a synchronous link
# This doesn't play nicely yet.
# It will
class SurfaceDisplayer(Axon.Component.component):
displaysize = (800, 600)
def __init__(self, **argd):
super(SurfaceDisplayer, self).__init__(**argd)
self.display = pygame.display.set_mode( self.displaysize )
def pygame_display_flip(self):
pygame.display.flip()
def main(self):
while 1:
while self.dataReady("inbox"):
snapshot = self.recv("inbox")
self.display.blit(snapshot, (80,60))
self.pygame_display_flip()
while not self.anyReady():
self.pause()
yield 1
yield 1
#
# This plays nicely with pygame display
#
from Kamaelia.UI.GraphicDisplay import PygameDisplay
class ProperSurfaceDisplayer(Axon.Component.component):
Inboxes = ["inbox", "control", "callback"]
Outboxes= ["outbox", "signal", "display_signal"]
displaysize = (640, 480)
def __init__(self, **argd):
super(ProperSurfaceDisplayer, self).__init__(**argd)
self.disprequest = { "DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"size": self.displaysize}
def pygame_display_flip(self):
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
def getDisplay(self):
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send(self.disprequest, "display_signal")
while not self.dataReady("callback"):
self.pause()
yield 1
self.display = self.recv("callback")
def main(self):
yield Axon.Ipc.WaitComplete(self.getDisplay())
time.sleep(1)
if 1:
while 1:
while self.dataReady("inbox"):
snapshot = self.recv("inbox")
self.display.blit(snapshot, (0,0))
self.pygame_display_flip()
while not self.anyReady():
self.pause()
yield 1
yield 1
class VideoCaptureSource(Axon.ThreadedComponent.threadedcomponent):
capturesize = ( 640, 480 )
mirror = True
delay = 0
fps = -1
def __init__(self, **argd):
super(VideoCaptureSource, self).__init__(**argd)
self.camera = X=pygame.camera.Camera("/dev/video0", self.capturesize)
self.camera.start()
if self.fps != -1:
self.delay = 1.0/self.fps # fps overrides delay
def capture_one(self):
snapshot = self.camera.get_image()
if self.mirror:
snapshot = pygame.transform.flip(snapshot,1,0)
return snapshot
def main(self):
t = time.time()
c = 0
while 1:
tn =time.time()
self.safeSend((tn, self.capture_one()), "outbox")
c +=1
if tn - t > 1:
td = tn -t
t = tn
print "FPS", c
c =0
# time.sleep(self.delay) # This would be far better to be a synchronous link
def safeSend(self, data, box):
while 1:
try:
self.send(data,box)
# print "SENT", data
return
except Axon.AxonExceptions.noSpaceInBox:
pass
# print "MISS", data
self.pause()
class ProperSurfaceDisplayer(Axon.Component.component):
Inboxes = ["inbox", "control", "callback"]
Outboxes= ["outbox", "signal", "display_signal"]
displaysize = (640, 480)
def __init__(self, **argd):
super(ProperSurfaceDisplayer, self).__init__(**argd)
self.disprequest = { "DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"size": self.displaysize}
def pygame_display_flip(self):
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
def getDisplay(self):
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send(self.disprequest, "display_signal")
while not self.dataReady("callback"):
self.pause()
yield 1
self.display = self.recv("callback")
def main(self):
# self.inboxes["inbox"].setSize(1)
yield Axon.Ipc.WaitComplete(self.getDisplay())
time.sleep(1)
if 1:
while 1:
while self.dataReady("inbox"):
t,snapshot = self.recv("inbox")
snapshot=snapshot.convert()
self.display.blit(snapshot, (0,0))
self.pygame_display_flip()
while not self.anyReady():
self.pause()
yield 1
yield 1
if 0:
VideoCapturePlayer().run() # Runs at full speed - basic test case
if 1:
print "Hello World"
sys.path.append("/home/zathras/kamaelia/trunk/Sketches/MH/pixformatConversion")
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
from VideoSurface import VideoSurface
class Framer(Axon.Component.component):
def main(self):
while 1:
while self.dataReady("inbox"):
D = self.recv("inbox")
print D, D[1].get_width(),D[1].get_height(),
X = {
"yuv" : (D[1], D[1], D[1]), # a tuple of strings
"size" : (D[1].get_width(), D[1].get_width()), # in pixels
"pixformat" : "YUV420_planar" # format of raw video data
}
self.send(X, "outbox")
yield 1
X = PygameDisplay(width=640,height=480).activate()
PygameDisplay.setDisplayService(X)
Pipeline(
VideoCaptureSource(fps=32),
# Framer(),
# VideoOverlay(),
ProperSurfaceDisplayer(),
).run()
if 1:
X = VideoCaptureSource(fps=32)
snap = X.capture_one()
pygame.image.save(snap, "photo.bmp")
# pygame.image.save(snap, "photo.png")
| apache-2.0 |
looooo/pivy | scons/scons-local-1.2.0.d20090919/SCons/Tool/ifort.py | 1 | 3365 | """SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py 4369 2009/09/19 15:58:29 scons"
import string
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] == 'posix':
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = string.replace(env[var], '-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| isc |
Codefans-fan/odoo | addons/analytic/analytic.py | 110 | 18041 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts'),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries'),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account'),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'manager_id': lambda self, cr, uid, ctx: ctx.get('manager_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
for name2 in name.split('/'):
name = name2.strip()
account_ids = self.search(cr, uid, dom + [('name', operator, name)] + args, limit=limit, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
else:
account_ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bitglue/shinysdr | shinysdr/plugins/vor/__init__.py | 1 | 9491 | # Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid <kpreid@switchb.org>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
# TODO: fully clean up this GRC-generated file
from __future__ import absolute_import, division
import math
import os.path
from twisted.web import static
from zope.interface import implementer
from gnuradio import analog
from gnuradio import blocks
from gnuradio import fft
from gnuradio import gr
from gnuradio import filter as grfilter # don't shadow builtin
from gnuradio.filter import firdes
from shinysdr.filters import make_resampler
from shinysdr.interfaces import ClientResourceDef, ModeDef, IDemodulator, IModulator
from shinysdr.plugins.basic_demod import SimpleAudioDemodulator, design_lofi_audio_filter
from shinysdr.signals import SignalType
from shinysdr.types import QuantityT, RangeT
from shinysdr import units
from shinysdr.values import ExportedState, exported_value, setter
audio_modulation_index = 0.07
fm_subcarrier = 9960
fm_deviation = 480
@implementer(IDemodulator)
class VOR(SimpleAudioDemodulator):
def __init__(self, mode='VOR', zero_point=59, **kwargs):
self.channel_rate = channel_rate = 40000
internal_audio_rate = 20000 # TODO over spec'd
self.zero_point = zero_point
transition = 5000
SimpleAudioDemodulator.__init__(self,
mode=mode,
audio_rate=internal_audio_rate,
demod_rate=channel_rate,
band_filter=fm_subcarrier * 1.25 + fm_deviation + transition / 2,
band_filter_transition=transition,
**kwargs)
self.dir_rate = dir_rate = 10
if internal_audio_rate % dir_rate != 0:
raise ValueError('Audio rate %s is not a multiple of direction-finding rate %s' % (internal_audio_rate, dir_rate))
self.dir_scale = dir_scale = internal_audio_rate // dir_rate
self.audio_scale = audio_scale = channel_rate // internal_audio_rate
self.zeroer = blocks.add_const_vff((zero_point * (math.pi / 180), ))
self.dir_vector_filter = grfilter.fir_filter_ccf(1, firdes.low_pass(
1, dir_rate, 1, 2, firdes.WIN_HAMMING, 6.76))
self.am_channel_filter_block = grfilter.fir_filter_ccf(1, firdes.low_pass(
1, channel_rate, 5000, 5000, firdes.WIN_HAMMING, 6.76))
self.goertzel_fm = fft.goertzel_fc(channel_rate, dir_scale * audio_scale, 30)
self.goertzel_am = fft.goertzel_fc(internal_audio_rate, dir_scale, 30)
self.fm_channel_filter_block = grfilter.freq_xlating_fir_filter_ccc(1, (firdes.low_pass(1.0, channel_rate, fm_subcarrier / 2, fm_subcarrier / 2, firdes.WIN_HAMMING)), fm_subcarrier, channel_rate)
self.multiply_conjugate_block = blocks.multiply_conjugate_cc(1)
self.complex_to_arg_block = blocks.complex_to_arg(1)
self.am_agc_block = analog.feedforward_agc_cc(1024, 1.0)
self.am_demod_block = analog.am_demod_cf(
channel_rate=channel_rate,
audio_decim=audio_scale,
audio_pass=5000,
audio_stop=5500,
)
self.fm_demod_block = analog.quadrature_demod_cf(1)
self.phase_agc_fm = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.phase_agc_am = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.probe = blocks.probe_signal_f()
self.audio_filter_block = grfilter.fir_filter_fff(1, design_lofi_audio_filter(internal_audio_rate, False))
##################################################
# Connections
##################################################
# Input
self.connect(
self,
self.band_filter_block)
# AM chain
self.connect(
self.band_filter_block,
self.am_channel_filter_block,
self.am_agc_block,
self.am_demod_block)
# AM audio
self.connect(
self.am_demod_block,
blocks.multiply_const_ff(1.0 / audio_modulation_index * 0.5),
self.audio_filter_block)
self.connect_audio_output(self.audio_filter_block)
# AM phase
self.connect(
self.am_demod_block,
self.goertzel_am,
self.phase_agc_am,
(self.multiply_conjugate_block, 0))
# FM phase
self.connect(
self.band_filter_block,
self.fm_channel_filter_block,
self.fm_demod_block,
self.goertzel_fm,
self.phase_agc_fm,
(self.multiply_conjugate_block, 1))
# Phase comparison and output
self.connect(
self.multiply_conjugate_block,
self.dir_vector_filter,
self.complex_to_arg_block,
blocks.multiply_const_ff(-1), # opposite angle conventions
self.zeroer,
self.probe)
@exported_value(type=QuantityT(units.degree), changes='this_setter', label='Zero')
def get_zero_point(self):
return self.zero_point
@setter
def set_zero_point(self, zero_point):
self.zero_point = zero_point
self.zeroer.set_k((self.zero_point * (math.pi / 180), ))
# TODO: Have a dedicated angle type which can be specified as referenced to true/magnetic north
@exported_value(type=QuantityT(units.degree), changes='continuous', label='Bearing')
def get_angle(self):
return self.probe.level()
@implementer(IModulator)
class VORModulator(gr.hier_block2, ExportedState):
__vor_sig_freq = 30
__audio_rate = 10000
__rf_rate = 30000 # needs to be above fm_subcarrier * 2
def __init__(self, context, mode, angle=0.0):
gr.hier_block2.__init__(
self, 'SimulatedDevice VOR modulator',
gr.io_signature(1, 1, gr.sizeof_float * 1),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
self.__angle = 0.0 # dummy statically visible value will be overwritten
# TODO: My signal level parameters are probably wrong because this signal doesn't look like a real VOR signal
vor_30 = analog.sig_source_f(self.__audio_rate, analog.GR_COS_WAVE, self.__vor_sig_freq, 1, 0)
vor_add = blocks.add_cc(1)
vor_audio = blocks.add_ff(1)
# Audio/AM signal
self.connect(
vor_30,
blocks.multiply_const_ff(0.3), # M_n
(vor_audio, 0))
self.connect(
self,
blocks.multiply_const_ff(audio_modulation_index), # M_i
(vor_audio, 1))
# Carrier component
self.connect(
analog.sig_source_c(0, analog.GR_CONST_WAVE, 0, 0, 1),
(vor_add, 0))
# AM component
self.__delay = blocks.delay(gr.sizeof_gr_complex, 0) # configured by set_angle
self.connect(
vor_audio,
make_resampler(self.__audio_rate, self.__rf_rate), # TODO make a complex version and do this last
blocks.float_to_complex(1),
self.__delay,
(vor_add, 1))
# FM component
vor_fm_mult = blocks.multiply_cc(1)
self.connect( # carrier generation
analog.sig_source_f(self.__rf_rate, analog.GR_COS_WAVE, fm_subcarrier, 1, 0),
blocks.float_to_complex(1),
(vor_fm_mult, 1))
self.connect( # modulation
vor_30,
make_resampler(self.__audio_rate, self.__rf_rate),
analog.frequency_modulator_fc(2 * math.pi * fm_deviation / self.__rf_rate),
blocks.multiply_const_cc(0.3), # M_d
vor_fm_mult,
(vor_add, 2))
self.connect(
vor_add,
self)
# calculate and initialize delay
self.set_angle(angle)
@exported_value(type=RangeT([(0, 2 * math.pi)], unit=units.degree, strict=False), changes='this_setter', label='Bearing')
def get_angle(self):
return self.__angle
@setter
def set_angle(self, value):
value = float(value)
compensation = math.pi / 180 * -6.5 # empirical, calibrated against VOR receiver (and therefore probably wrong)
value = value + compensation
value = value % (2 * math.pi)
phase_shift = int(self.__rf_rate / self.__vor_sig_freq * (value / (2 * math.pi)))
self.__delay.set_dly(phase_shift)
self.__angle = value
def get_input_type(self):
return SignalType(kind='MONO', sample_rate=self.__audio_rate)
def get_output_type(self):
return SignalType(kind='IQ', sample_rate=self.__rf_rate)
# Twisted plugin exports
pluginMode = ModeDef(mode='VOR',
info='VOR',
demod_class=VOR,
mod_class=VORModulator)
pluginClient = ClientResourceDef(
key=__name__,
resource=static.File(os.path.join(os.path.split(__file__)[0], 'client')),
load_js_path='vor.js')
| gpl-3.0 |
zhexiao/ezhost | docs_en/conf.py | 1 | 9557 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ezhost documentation build configuration file, created by
# sphinx-quickstart on Wed May 25 11:10:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ezhost'
copyright = '2016, Zhe Xiao'
author = 'Zhe Xiao'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.5'
# The full version, including alpha/beta/rc tags.
release = '1.2.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'ezhost v1.2.5'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ezhostdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ezhost.tex', 'ezhost Documentation',
'Zhe Xiao', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ezhost', 'ezhost Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ezhost', 'ezhost Documentation',
author, 'ezhost', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
github_url = 'https://github.com/zhexiao/ezhost'
| mit |
sertac/django | django/core/cache/backends/memcached.py | 320 | 6970 | "Memcached cache backend"
import pickle
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS')
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/NewProgramming#Expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
l = lambda x: self.make_key(x, version=version)
self._cache.delete_multi(map(l, keys))
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
| bsd-3-clause |
shanil-puri/mase | python101/code/pie.py | 14 | 1636 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
try:
# see if Swampy is installed as a package
from swampy.TurtleWorld import *
except ImportError:
# otherwise see if the modules are on the PYTHONPATH
from TurtleWorld import *
def draw_pie(t, n, r):
"""Draws a pie, then moves into position to the right.
t: Turtle
n: number of segments
r: length of the radial spokes
"""
polypie(t, n, r)
pu(t)
fd(t, r*2 + 10)
pd(t)
def polypie(t, n, r):
"""Draws a pie divided into radial segments.
t: Turtle
n: number of segments
r: length of the radial spokes
"""
angle = 360.0 / n
for i in range(n):
isosceles(t, r, angle/2)
lt(t, angle)
def isosceles(t, r, angle):
"""Draws an icosceles triangle.
The turtle starts and ends at the peak, facing the middle of the base.
t: Turtle
r: length of the equal legs
angle: peak angle in degrees
"""
y = r * math.sin(angle * math.pi / 180)
rt(t, angle)
fd(t, r)
lt(t, 90+angle)
fd(t, 2*y)
lt(t, 90+angle)
fd(t, r)
lt(t, 180-angle)
# create the world and bob
world = TurtleWorld()
bob = Turtle()
bob.delay = 0
pu(bob)
bk(bob, 130)
pd(bob)
# draw polypies with various number of sides
size = 40
draw_pie(bob, 5, size)
draw_pie(bob, 6, size)
draw_pie(bob, 7, size)
draw_pie(bob, 8, size)
die(bob)
# dump the contents of the campus to the file canvas.eps
world.canvas.dump()
wait_for_user()
| unlicense |
Darkmer/masterchief | CourseBuilderenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| mit |
AdaLovelance/server | djangoTodoEnUno.py | 2 | 2602 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Este script es posible gracias a la siguiente fuente:
http://michal.karzynski.pl/blog/2013/06/09/django-nginx-gunicorn-virtualenv-supervisor/
Creado por kaotika ada_lovelance@hackingcodeschool.net
Visitanos en hackingcodeschool.net
Created on Wed Dec 25 04:00:46 2013
@author: kaotika
"""
import sys
import subprocess
DEBUG = 1
LOGFILE = "/tmp/server.log"
ORDEN_PSQL = [ "su" , "-" , "postgres" , "-c",
"orden que quieras ejecutar"]
if not DEBUG:
def log(cadena):
""" Función Nula."""
pass
else:
def log(cadena):
"""Función encargada de escribir un log básico."""
volcado = open(LOGFILE, "a")
volcado.write(str(cadena) + '\n')
volcado.close()
print "Este script configurará Django con Nginx, Gunicorn, virtualenv,\n\
supervisor y PostgreSQL, este script requiere permisos de superusuario\n\n"
print "Lo primero que se va a ejecutar es aptitude update && upgrade,\n\
es recomendable para las instalaciones posteriores\n\n"
respUdt = raw_input('Permites actualizar el sistema (s/n)')
if respUdt == 's':
subprocess.call(['aptitude', 'update'])
subprocess.call(['aptitude', 'upgrade'])
else:
print "Tu sistema no se actualizará"
exit("\nGracias por tu colaboración.\n")
print "\nAhora te preguntaremos algunos detalles más.\n"
dominio = raw_input('Introduce tu dominio ej hackingcodeschool.net : ')
respdb = \
raw_input("Se recomienda postgres como base de datos, \n\
¿deseas instalarlo?, (s/n): ")
if respdb == 's':
subprocess.call(['aptitude', 'install',
'postgresql postgresql-contrib'])
nombredb = \
raw_input('Introduce un nombre para tu nueva base de datos')
usuariodb = raw_input('Introduce un usuari@ para tu base de datos: '
)
passdb1 = \
raw_input("Introduce una contraseña para tu base de datos:")
passdb2 = raw_input("Repite tu contraseña")
while passdb1 != passdb2:
print "Las contraseñas introducidas no coinciden"
passdb1 = \
raw_input("Introduce una contraseña para tu base de datos:")
passdb2 = raw_input("Repite tu contraseña")
else:
print "La contraseña ha sido almacenada correctamente"
# Mira la lista ORDEN_PSQL al comienzo del script
ORDEN_PSQL.append(usuariodb)
ORDEN_PSQL.append(passdb1)
# Ahora ejecutamos el script
subprocess.call(ORDEN_PSQL)
else:
sys.exit("No se ha instalado nada, pero tu sistema está actualizado.")
| gpl-2.0 |
oscarolar/odoo | addons/sale_crm/wizard/__init__.py | 443 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_make_sale
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
imply/chuu | chrome/common/extensions/docs/server2/host_file_system_creator.py | 23 | 2437 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from caching_file_system import CachingFileSystem
from local_file_system import LocalFileSystem
from offline_file_system import OfflineFileSystem
from subversion_file_system import SubversionFileSystem
class HostFileSystemCreator(object):
'''Creates host file systems with configuration information. By default, SVN
file systems are created, although a constructor method can be passed in to
override this behavior (likely for testing purposes).
'''
def __init__(self,
object_store_creator,
offline=False,
constructor_for_test=None):
self._object_store_creator = object_store_creator
# Determines whether or not created file systems will be wrapped in an
# OfflineFileSystem.
self._offline = offline
# Provides custom create behavior, useful in tests.
self._constructor_for_test = constructor_for_test
def Create(self, branch='trunk', revision=None, offline=None):
'''Creates either SVN file systems or specialized file systems from the
constructor passed into this instance. Wraps the resulting file system in
an Offline file system if the offline flag is set, and finally wraps it in a
Caching file system.
'''
if self._constructor_for_test is not None:
file_system = self._constructor_for_test(branch=branch, revision=revision)
else:
file_system = SubversionFileSystem.Create(branch=branch,
revision=revision)
if offline or (offline is None and self._offline):
file_system = OfflineFileSystem(file_system)
return CachingFileSystem(file_system, self._object_store_creator)
@staticmethod
def ForLocal(object_store_creator):
'''Used in creating a server instance on localhost.
'''
return HostFileSystemCreator(
object_store_creator,
constructor_for_test=lambda **_: LocalFileSystem.Create())
@staticmethod
def ForTest(file_system, object_store_creator):
'''Used in creating a test server instance. The HostFileSystemCreator
returned here will always return |file_system| when its Create() method is
called.
'''
return HostFileSystemCreator(
object_store_creator,
constructor_for_test=lambda **_: file_system)
| bsd-3-clause |
hvwaldow/inktex | inktex/ui.py | 2 | 8707 | import os
import pygtk
pygtk.require('2.0')
import gtk
from gtkcodebuffer import CodeBuffer, SyntaxLoader
class Ui(object):
"""
The user interface. This dialog is the LaTeX input window and includes
widgets to display compilation logs and a preview. It uses GTK2 which
must be installed an importable.
"""
app_name = 'InkTeX'
help_text = r"""You can set a preamble file and scale factor in the <b>settings</b> tab. The preamble should not include <b>\documentclass</b> and <b>\begin{document}</b>.
The LaTeX code you write is only the stuff between <b>\begin{document}</b> and <b>\end{document}</b>. Compilation errors are reported in the <b>log</b> tab.
The preamble file and scale factor are stored on a per-drawing basis, so in a new document, these information must be set again."""
about_text = r"""Written by <a href="mailto:janoliver@oelerich.org">Jan Oliver Oelerich <janoliver@oelerich.org></a>"""
def __init__(self, render_callback, src, settings):
"""Takes the following parameters:
* render_callback: callback function to execute with "apply" button
* src: source code that should be pre-inserted into the LaTeX input"""
self.render_callback = render_callback
self.src = src if src else ""
self.settings = settings
# init the syntax highlighting buffer
lang = SyntaxLoader("latex")
self.syntax_buffer = CodeBuffer(lang=lang)
self.setup_ui()
def render(self, widget, data=None):
"""Extracts the input LaTeX code and calls the render callback. If that
returns true, we quit and are happy."""
buf = self.text.get_buffer()
tex = buf.get_text(buf.get_start_iter(), buf.get_end_iter())
settings = dict()
if self.preamble.get_filename():
settings['preamble'] = self.preamble.get_filename()
settings['scale'] = self.scale.get_value()
if self.render_callback(tex, settings):
gtk.main_quit()
return False
def cancel(self, widget, data=None):
"""Close button pressed: Exit"""
raise SystemExit(1)
def destroy(self, widget, event, data=None):
"""Destroy hook for the GTK window. Quit and return False."""
gtk.main_quit()
return False
def setup_ui(self):
"""Creates the actual UI."""
# create a floating toplevel window and set some title and border
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
self.window.set_title(self.app_name)
self.window.set_border_width(8)
# connect delete and destroy events
self.window.connect("destroy", self.destroy)
self.window.connect("delete-event", self.destroy)
# This is our main container, vertically ordered.
self.box_container = gtk.VBox(False, 5)
self.box_container.show()
self.notebook = gtk.Notebook()
self.page_latex = gtk.HBox(False, 5)
self.page_latex.set_border_width(8)
self.page_latex.show()
self.page_log = gtk.HBox(False, 5)
self.page_log.set_border_width(8)
self.page_log.show()
self.page_settings = gtk.HBox(False, 5)
self.page_settings.set_border_width(8)
self.page_settings.show()
self.page_help = gtk.VBox(False, 5)
self.page_help.set_border_width(8)
self.page_help.show()
self.notebook.append_page(self.page_latex, gtk.Label("LaTeX"))
self.notebook.append_page(self.page_log, gtk.Label("Log"))
self.notebook.append_page(self.page_settings, gtk.Label("Settings"))
self.notebook.append_page(self.page_help, gtk.Label("Help"))
self.notebook.show()
# First component: The input text view for the LaTeX code.
# It lives in a ScrolledWindow so we can get some scrollbars when the
# text is too long.
self.text = gtk.TextView(self.syntax_buffer)
self.text.get_buffer().set_text(self.src)
self.text.show()
self.text_container = gtk.ScrolledWindow()
self.text_container.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
self.text_container.set_shadow_type(gtk.SHADOW_IN)
self.text_container.add(self.text)
self.text_container.set_size_request(400, 200)
self.text_container.show()
self.page_latex.pack_start(self.text_container)
# Second component: The log view
self.log_view = gtk.TextView()
self.log_view.show()
self.log_container = gtk.ScrolledWindow()
self.log_container.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
self.log_container.set_shadow_type(gtk.SHADOW_IN)
self.log_container.add(self.log_view)
self.log_container.set_size_request(400, 200)
self.log_container.show()
self.page_log.pack_start(self.log_container)
# third component: settings
self.settings_container = gtk.Table(2,2)
self.settings_container.set_row_spacings(8)
self.settings_container.show()
self.label_preamble = gtk.Label("Preamble")
self.label_preamble.set_alignment(0, 0.5)
self.label_preamble.show()
self.preamble = gtk.FileChooserButton("...")
if 'preamble' in self.settings and os.path.exists(self.settings['preamble']):
self.preamble.set_filename(self.settings['preamble'])
self.preamble.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
self.preamble.show()
self.settings_container.attach(self.label_preamble, yoptions=gtk.SHRINK,
left_attach=0, right_attach=1, top_attach=0, bottom_attach=1)
self.settings_container.attach(self.preamble, yoptions=gtk.SHRINK,
left_attach=1, right_attach=2, top_attach=0, bottom_attach=1)
self.label_scale = gtk.Label("Scale")
self.label_scale.set_alignment(0, 0.5)
self.label_scale.show()
self.scale_adjustment = gtk.Adjustment(value=1.0, lower=0, upper=100,
step_incr=0.1)
self.scale = gtk.SpinButton(adjustment=self.scale_adjustment, digits=1)
if 'scale' in self.settings:
self.scale.set_value(float(self.settings['scale']))
self.scale.show()
self.settings_container.attach(self.label_scale, yoptions=gtk.SHRINK,
left_attach=0, right_attach=1, top_attach=1, bottom_attach=2)
self.settings_container.attach(self.scale, yoptions=gtk.SHRINK,
left_attach=1, right_attach=2, top_attach=1, bottom_attach=2)
self.page_settings.pack_start(self.settings_container)
# help tab
self.help_label = gtk.Label()
self.help_label.set_markup(Ui.help_text)
self.help_label.set_line_wrap(True)
self.help_label.show()
self.about_label = gtk.Label()
self.about_label.set_markup(Ui.about_text)
self.about_label.set_line_wrap(True)
self.about_label.show()
self.separator_help = gtk.HSeparator()
self.separator_help.show()
self.page_help.pack_start(self.help_label)
self.page_help.pack_start(self.separator_help)
self.page_help.pack_start(self.about_label)
self.box_container.pack_start(self.notebook, True, True)
# separator between buttonbar and notebook
self.separator_buttons = gtk.HSeparator()
self.separator_buttons.show()
self.box_container.pack_start(self.separator_buttons, False, False)
# the button bar
self.box_buttons = gtk.HButtonBox()
self.box_buttons.set_layout(gtk.BUTTONBOX_END)
self.box_buttons.show()
self.button_render = gtk.Button(stock=gtk.STOCK_APPLY)
self.button_cancel = gtk.Button(stock=gtk.STOCK_CLOSE)
self.button_render.set_flags(gtk.CAN_DEFAULT)
self.button_render.connect("clicked", self.render, None)
self.button_cancel.connect("clicked", self.cancel, None)
self.button_render.show()
self.button_cancel.show()
self.box_buttons.pack_end(self.button_cancel)
self.box_buttons.pack_end(self.button_render)
self.box_container.pack_start(self.box_buttons, False, False)
self.window.add(self.box_container)
self.window.set_default(self.button_render)
self.window.show()
def log(self, msg):
buffer = self.log_view.get_buffer()
buffer.set_text(msg)
self.notebook.set_current_page(1)
def main(self):
gtk.main()
| mit |
ctgriffiths/twister | installer/installer_client.py | 1 | 8553 |
# version: 3.005
# File: installer.py ; This file is part of Twister.
# Copyright (C) 2012-2013 , Luxoft
# Authors:
# Andrei Costachi <acostachi@luxoft.com>
# Cristi Constantin <crconstantin@luxoft.com>
# Mihai Dobre <mihdobre@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Twister Installer
=================
Requires Python 2.7 and a Linux machine. The installer doesn't run on Windows!
When installing Twister for the first time, you must run install_dependencies first.
Twister Client will be installed in the home of your user, in the folder `twister`.
'''
import os, sys
import binascii
import shutil
import subprocess
from string import Template
from distutils import file_util
from distutils import dir_util
__dir__ = os.path.split(__file__)[0]
if __dir__: os.chdir(__dir__)
# --------------------------------------------------------------------------------------------------
# Install Client ?
# --------------------------------------------------------------------------------------------------
def userHome(user):
return subprocess.check_output('echo ~' + user, shell=True).strip()
try:
user_name = os.getenv('USER')
if user_name=='root':
user_name = os.getenv('SUDO_USER')
if not user_name:
print('Cannot guess the Username! Exiting!\n')
exit(1)
except:
print('Cannot guess the Username! Exiting!\n')
exit(1)
# --------------------------------------------------------------------------------------------------
# Previous installations of Twister
# --------------------------------------------------------------------------------------------------
# Twister client path
INSTALL_PATH = userHome(user_name) + os.sep + 'twister/'
cfg_path = INSTALL_PATH + 'config/'
tmp_config = ''
print('Hello `{}` !\n'.format(user_name))
if os.path.exists(INSTALL_PATH):
print('WARNING! Another version of Twister is installed at `%s`!' % INSTALL_PATH)
print('If you continue, all files from that folder will be PERMANENTLY DELETED,')
print('Only the `config` folder will be saved!')
selected = raw_input('Are you sure you want to continue? (yes/no): ')
if selected.strip().lower() in ['y', 'yes']:
# Backup CONFIG folder for client
if os.path.isdir(cfg_path):
if os.getuid() != 0: # Normal user
tmp_config = userHome(user_name) + '/.twister/'
else: # ROOT user
tmp_config = '/tmp/twister_client_config/'
# Remove old tmp config
if os.path.isdir(tmp_config):
shutil.rmtree(tmp_config)
print('\nBack-up config folder (from `{}` to `{}`)...'.format(cfg_path, tmp_config))
try:
shutil.move(cfg_path, tmp_config)
except Exception as e:
print('\nInsuficient rights to move the config folder `{}`!\n'
'The installation cannot continue if you don\'t have permissions to move that folder!\n'.format(cfg_path))
exit(1)
# Deleting previous versions of Twister
try: dir_util.remove_tree(INSTALL_PATH)
except:
print('Error! Cannot delete Twister dir `{}` !'.format(INSTALL_PATH))
try: os.mkdir(INSTALL_PATH)
except:
print('Error! Cannot create Twister dir `{}` !'.format(INSTALL_PATH))
print('You probably don\'t have enough privileges to read and write in `{}` !\n'.format(INSTALL_PATH))
exit(1)
else:
print('\nPlease backup your data, then restart the installer.')
print('Exiting.\n')
exit(0)
# --------------------------------------------------------------------------------------------------
# Start copying files
# --------------------------------------------------------------------------------------------------
# Files to move in Client folder
to_copy = [
'bin/cli.py',
'bin/start_client',
'bin/start_client.py',
'bin/start_packet_sniffer.py',
'doc/',
'demo/',
'config/',
'client/',
'services/PacketSniffer/',
'services/__init__.py',
'common/__init__.py',
'common/constants.py',
'common/suitesmanager.py',
'common/configobj.py',
'common/jython/',
]
ROOT_FOLDER = os.sep.join( os.getcwd().split(os.sep)[:-1] )
cwd_path = os.getcwd() + os.sep
pkg_path = cwd_path + 'packages/'
print('')
for fname in to_copy:
fpath = ROOT_FOLDER + os.sep + fname
dpath = os.path.dirname(fname)
if dpath and ( not os.path.exists(INSTALL_PATH+dpath) ):
try:
dir_util.mkpath(INSTALL_PATH + dpath)
print('Created folder structure `%s`.' % (INSTALL_PATH+dpath))
except:
print('Cannot create folder `%s`!' % (INSTALL_PATH+dpath))
if os.path.isdir(fpath):
try:
dir_util.copy_tree(fpath, INSTALL_PATH + dpath)
print('Copied dir `%s` to `%s`.' % (fpath, INSTALL_PATH+dpath))
except:
print('Cannot copy dir `%s` to `%s`!' % (fpath, INSTALL_PATH+dpath))
elif os.path.isfile(fpath):
try:
file_util.copy_file(fpath, INSTALL_PATH + dpath)
print('Copied file `%s` to `%s`.' % (fpath, INSTALL_PATH+dpath))
except:
print('Cannot copy file `%s` to `%s`!' % (fpath, INSTALL_PATH+dpath))
else:
print('Path `{}` does not exist and will not be copied!'.format(fpath))
# Create cache and logs folders
try: os.mkdir(INSTALL_PATH + '/.twister_cache')
except: pass
try: os.mkdir(INSTALL_PATH + '/logs')
except: pass
try: os.mkdir(INSTALL_PATH + '/config/sut')
except: pass
try: os.mkdir(INSTALL_PATH + '/config/predefined')
except: pass
try: os.mkdir(INSTALL_PATH + '/config/test_config')
except: pass
# Delete Server config files...
try: os.remove(INSTALL_PATH +os.sep+ 'config/resources.json')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/services.ini')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/server_init.ini')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/users_and_groups.ini')
except: pass
try: os.remove(INSTALL_PATH +os.sep+ 'config/shared_db.xml')
except: pass
# Restore CONFIG folder, if any
if os.path.exists(tmp_config):
print('\nMoving `config` folder back (from `{}` to `{}`)...'.format(tmp_config, cfg_path))
for xname in os.listdir(tmp_config):
src_name = tmp_config + xname
dst_name = cfg_path + xname
if os.path.isfile(dst_name):
os.remove(dst_name)
elif os.path.isdir(dst_name):
shutil.rmtree(dst_name)
print('Restoring config `{}`.'.format(dst_name))
shutil.move(src_name, cfg_path)
# Change owner for install folder...
if os.getuid() == 0:
tcr_proc = subprocess.Popen(['chown', user_name+':'+user_name, INSTALL_PATH, '-R'],)
tcr_proc.wait()
tcr_proc = subprocess.Popen(['chmod', '775', INSTALL_PATH, '-R'],)
tcr_proc.wait()
try:
tcr_proc = subprocess.Popen(['chmod', '777', INSTALL_PATH +os.sep+ 'logs', '-R'],)
tcr_proc.wait()
except:
print('Cannot CHMOD 777 the logs folder!')
for ext in ['txt', 'xml', 'py', 'tcl', 'plx', 'json', 'ini', 'htm', 'js', 'css']:
os.system('find %s -name "*.%s" -exec chmod 664 {} \;' % (INSTALL_PATH, ext))
# Make executables
os.system('find %s -name "cli.py" -exec chmod +x {} \;' % INSTALL_PATH)
os.system('find %s -name "start_client" -exec chmod +x {} \;' % INSTALL_PATH)
# Fix FWM Config XML
fwm = Template( open(INSTALL_PATH + 'config/fwmconfig.xml', 'r').read() )
open(INSTALL_PATH + 'config/fwmconfig.xml', 'w').write( fwm.substitute(HOME=userHome(user_name)) )
del fwm
# Check user's encr key
user_key = '{}config/twister.key'.format(INSTALL_PATH)
if os.path.isfile(user_key) and open(user_key).read():
print('User key ok.')
else:
print('Generating new user key...')
with open(user_key, 'w') as f:
f.write(binascii.hexlify(os.urandom(16)))
print('User key saved in "config/twister.key". Don\'t change this file!')
print('\nTwister installation done!\n')
| apache-2.0 |
kennedyshead/home-assistant | homeassistant/components/almond/config_flow.py | 2 | 4060 | """Config flow to connect with Home Assistant."""
import asyncio
import logging
from aiohttp import ClientError
import async_timeout
from pyalmond import AlmondLocalAuth, WebAlmondAPI
import voluptuous as vol
from yarl import URL
from homeassistant import config_entries, core, data_entry_flow
from homeassistant.helpers import aiohttp_client, config_entry_oauth2_flow
from .const import DOMAIN as ALMOND_DOMAIN, TYPE_LOCAL, TYPE_OAUTH2
async def async_verify_local_connection(hass: core.HomeAssistant, host: str):
"""Verify that a local connection works."""
websession = aiohttp_client.async_get_clientsession(hass)
api = WebAlmondAPI(AlmondLocalAuth(host, websession))
try:
with async_timeout.timeout(10):
await api.async_list_apps()
return True
except (asyncio.TimeoutError, ClientError):
return False
@config_entries.HANDLERS.register(ALMOND_DOMAIN)
class AlmondFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
"""Implementation of the Almond OAuth2 config flow."""
DOMAIN = ALMOND_DOMAIN
host = None
hassio_discovery = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": "profile user-read user-read-results user-exec-command"}
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
# Only allow 1 instance.
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return await super().async_step_user(user_input)
async def async_step_auth(self, user_input=None):
"""Handle authorize step."""
result = await super().async_step_auth(user_input)
if result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP:
self.host = str(URL(result["url"]).with_path("me"))
return result
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an entry for the flow.
Ok to override if you want to fetch extra info or even add another step.
"""
data["type"] = TYPE_OAUTH2
data["host"] = self.host
return self.async_create_entry(title=self.flow_impl.name, data=data)
async def async_step_import(self, user_input: dict = None) -> dict:
"""Import data."""
# Only allow 1 instance.
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if not await async_verify_local_connection(self.hass, user_input["host"]):
self.logger.warning(
"Aborting import of Almond because we're unable to connect"
)
return self.async_abort(reason="cannot_connect")
return self.async_create_entry(
title="Configuration.yaml",
data={"type": TYPE_LOCAL, "host": user_input["host"]},
)
async def async_step_hassio(self, discovery_info):
"""Receive a Hass.io discovery."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
self.hassio_discovery = discovery_info
return await self.async_step_hassio_confirm()
async def async_step_hassio_confirm(self, user_input=None):
"""Confirm a Hass.io discovery."""
data = self.hassio_discovery
if user_input is not None:
return self.async_create_entry(
title=data["addon"],
data={
"is_hassio": True,
"type": TYPE_LOCAL,
"host": f"http://{data['host']}:{data['port']}",
},
)
return self.async_show_form(
step_id="hassio_confirm",
description_placeholders={"addon": data["addon"]},
data_schema=vol.Schema({}),
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.